commit 37f91f7993af3d89e2cae9ab12a4cf5207e945f4 Author: Alan Gates Date: Mon Mar 26 11:29:07 2018 -0700 HIVE-18755 Modifications to the metastore for catalogs diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java index b6fe9ceb56..a377805549 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java @@ -106,7 +106,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, case HiveParser.TOK_TABLEPARTCOLS: List partCols = BaseSemanticAnalyzer - .getColumns(child, false); + .getColumns(child, false, context.getConf()); for (FieldSchema fs : partCols) { if (!fs.getType().equalsIgnoreCase("string")) { throw new SemanticException( diff --git hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java index d64718159b..8523428013 100644 --- hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java +++ hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java @@ -74,6 +74,8 @@ import com.google.common.collect.Lists; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + /** * An implementation of {@link org.apache.hadoop.hive.metastore.MetaStoreEventListener} that * stores events in the database. @@ -140,6 +142,7 @@ public void onCreateTable(CreateTableEvent tableEvent) throws MetaException { NotificationEvent event = new NotificationEvent(0, now(), EventType.CREATE_TABLE.toString(), msgFactory .buildCreateTableMessage(t, new FileIterator(t.getSd().getLocation())).toString()); + event.setCatName(t.isSetCatName() ? t.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(t.getDbName()); event.setTableName(t.getTableName()); process(event, tableEvent); @@ -155,6 +158,7 @@ public void onDropTable(DropTableEvent tableEvent) throws MetaException { NotificationEvent event = new NotificationEvent(0, now(), EventType.DROP_TABLE.toString(), msgFactory .buildDropTableMessage(t).toString()); + event.setCatName(t.isSetCatName() ? t.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(t.getDbName()); event.setTableName(t.getTableName()); process(event, tableEvent); @@ -171,6 +175,7 @@ public void onAlterTable(AlterTableEvent tableEvent) throws MetaException { NotificationEvent event = new NotificationEvent(0, now(), EventType.ALTER_TABLE.toString(), msgFactory .buildAlterTableMessage(before, after, tableEvent.getIsTruncateOp()).toString()); + event.setCatName(after.isSetCatName() ? after.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(after.getDbName()); event.setTableName(after.getTableName()); process(event, tableEvent); @@ -279,6 +284,7 @@ public void onAddPartition(AddPartitionEvent partitionEvent) throws MetaExceptio new PartitionFilesIterator(partitionEvent.getPartitionIterator(), t)).toString(); NotificationEvent event = new NotificationEvent(0, now(), EventType.ADD_PARTITION.toString(), msg); + event.setCatName(t.isSetCatName() ? t.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(t.getDbName()); event.setTableName(t.getTableName()); process(event, partitionEvent); @@ -294,6 +300,7 @@ public void onDropPartition(DropPartitionEvent partitionEvent) throws MetaExcept NotificationEvent event = new NotificationEvent(0, now(), EventType.DROP_PARTITION.toString(), msgFactory .buildDropPartitionMessage(t, partitionEvent.getPartitionIterator()).toString()); + event.setCatName(t.isSetCatName() ? t.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(t.getDbName()); event.setTableName(t.getTableName()); process(event, partitionEvent); @@ -310,6 +317,7 @@ public void onAlterPartition(AlterPartitionEvent partitionEvent) throws MetaExce NotificationEvent event = new NotificationEvent(0, now(), EventType.ALTER_PARTITION.toString(), msgFactory .buildAlterPartitionMessage(partitionEvent.getTable(), before, after, partitionEvent.getIsTruncateOp()).toString()); + event.setCatName(before.isSetCatName() ? before.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(before.getDbName()); event.setTableName(before.getTableName()); process(event, partitionEvent); @@ -325,6 +333,7 @@ public void onCreateDatabase(CreateDatabaseEvent dbEvent) throws MetaException { NotificationEvent event = new NotificationEvent(0, now(), EventType.CREATE_DATABASE.toString(), msgFactory .buildCreateDatabaseMessage(db).toString()); + event.setCatName(db.isSetCatalogName() ? db.getCatalogName() : DEFAULT_CATALOG_NAME); event.setDbName(db.getName()); process(event, dbEvent); } @@ -339,6 +348,7 @@ public void onDropDatabase(DropDatabaseEvent dbEvent) throws MetaException { NotificationEvent event = new NotificationEvent(0, now(), EventType.DROP_DATABASE.toString(), msgFactory .buildDropDatabaseMessage(db).toString()); + event.setCatName(db.isSetCatalogName() ? db.getCatalogName() : DEFAULT_CATALOG_NAME); event.setDbName(db.getName()); process(event, dbEvent); } @@ -354,6 +364,7 @@ public void onAlterDatabase(AlterDatabaseEvent dbEvent) throws MetaException { NotificationEvent event = new NotificationEvent(0, now(), EventType.ALTER_DATABASE.toString(), msgFactory .buildAlterDatabaseMessage(oldDb, newDb).toString()); + event.setCatName(oldDb.isSetCatalogName() ? oldDb.getCatalogName() : DEFAULT_CATALOG_NAME); event.setDbName(oldDb.getName()); process(event, dbEvent); } @@ -368,6 +379,7 @@ public void onCreateFunction(CreateFunctionEvent fnEvent) throws MetaException { NotificationEvent event = new NotificationEvent(0, now(), EventType.CREATE_FUNCTION.toString(), msgFactory .buildCreateFunctionMessage(fn).toString()); + event.setCatName(fn.isSetCatName() ? fn.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(fn.getDbName()); process(event, fnEvent); } @@ -382,6 +394,7 @@ public void onDropFunction(DropFunctionEvent fnEvent) throws MetaException { NotificationEvent event = new NotificationEvent(0, now(), EventType.DROP_FUNCTION.toString(), msgFactory .buildDropFunctionMessage(fn).toString()); + event.setCatName(fn.isSetCatName() ? fn.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(fn.getDbName()); process(event, fnEvent); } @@ -419,6 +432,7 @@ public void onInsert(InsertEvent insertEvent) throws MetaException { insertEvent.getPartitionObj(), insertEvent.isReplace(), new FileChksumIterator(insertEvent.getFiles(), insertEvent.getFileChecksums())) .toString()); + event.setCatName(tableObj.isSetCatName() ? tableObj.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(tableObj.getDbName()); event.setTableName(tableObj.getTableName()); process(event, insertEvent); @@ -445,6 +459,7 @@ public void onAddPrimaryKey(AddPrimaryKeyEvent addPrimaryKeyEvent) throws MetaEx NotificationEvent event = new NotificationEvent(0, now(), EventType.ADD_PRIMARYKEY.toString(), msgFactory .buildAddPrimaryKeyMessage(addPrimaryKeyEvent.getPrimaryKeyCols()).toString()); + event.setCatName(cols.get(0).isSetCatName() ? cols.get(0).getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(cols.get(0).getTable_db()); event.setTableName(cols.get(0).getTable_name()); process(event, addPrimaryKeyEvent); @@ -462,6 +477,7 @@ public void onAddForeignKey(AddForeignKeyEvent addForeignKeyEvent) throws MetaEx NotificationEvent event = new NotificationEvent(0, now(), EventType.ADD_FOREIGNKEY.toString(), msgFactory .buildAddForeignKeyMessage(addForeignKeyEvent.getForeignKeyCols()).toString()); + event.setCatName(cols.get(0).isSetCatName() ? cols.get(0).getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(cols.get(0).getPktable_db()); event.setTableName(cols.get(0).getPktable_name()); process(event, addForeignKeyEvent); @@ -479,6 +495,7 @@ public void onAddUniqueConstraint(AddUniqueConstraintEvent addUniqueConstraintEv NotificationEvent event = new NotificationEvent(0, now(), EventType.ADD_UNIQUECONSTRAINT.toString(), msgFactory .buildAddUniqueConstraintMessage(addUniqueConstraintEvent.getUniqueConstraintCols()).toString()); + event.setCatName(cols.get(0).isSetCatName() ? cols.get(0).getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(cols.get(0).getTable_db()); event.setTableName(cols.get(0).getTable_name()); process(event, addUniqueConstraintEvent); @@ -496,6 +513,7 @@ public void onAddNotNullConstraint(AddNotNullConstraintEvent addNotNullConstrain NotificationEvent event = new NotificationEvent(0, now(), EventType.ADD_NOTNULLCONSTRAINT.toString(), msgFactory .buildAddNotNullConstraintMessage(addNotNullConstraintEvent.getNotNullConstraintCols()).toString()); + event.setCatName(cols.get(0).isSetCatName() ? cols.get(0).getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(cols.get(0).getTable_db()); event.setTableName(cols.get(0).getTable_name()); process(event, addNotNullConstraintEvent); @@ -514,6 +532,7 @@ public void onDropConstraint(DropConstraintEvent dropConstraintEvent) throws Met NotificationEvent event = new NotificationEvent(0, now(), EventType.DROP_CONSTRAINT.toString(), msgFactory .buildDropConstraintMessage(dbName, tableName, constraintName).toString()); + event.setCatName(dropConstraintEvent.getCatName()); event.setDbName(dbName); event.setTableName(tableName); process(event, dropConstraintEvent); diff --git hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java index 53246a0eb5..649d901209 100644 --- hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java +++ hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java @@ -207,7 +207,7 @@ public void onCreateTable(CreateTableEvent tableEvent) throws MetaException { Configuration conf = handler.getConf(); Table newTbl; try { - newTbl = handler.get_table_core(tbl.getDbName(), tbl.getTableName()) + newTbl = handler.get_table_core(tbl.getCatName(), tbl.getDbName(), tbl.getTableName()) .deepCopy(); newTbl.getParameters().put( HCatConstants.HCAT_MSGBUS_TOPIC_NAME, diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 5cc407ad68..4697f60209 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hive.metastore.api.ISchemaName; import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -146,6 +147,40 @@ public void rollbackTransaction() { } @Override + public void createCatalog(Catalog cat) throws MetaException { + if (shouldEventSucceed) { + objectStore.createCatalog(cat); + } else { + throw new RuntimeException("Failed event"); + } + } + + @Override + public void alterCatalog(String catName, Catalog cat) throws MetaException, + InvalidOperationException { + objectStore.alterCatalog(catName, cat); + } + + @Override + public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException { + return objectStore.getCatalog(catalogName); + } + + @Override + public List getCatalogs() throws MetaException { + return objectStore.getCatalogs(); + } + + @Override + public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException { + if (shouldEventSucceed) { + objectStore.dropCatalog(catalogName); + } else { + throw new RuntimeException("Event failed."); + } + } + + @Override public void createDatabase(Database db) throws InvalidObjectException, MetaException { if (shouldEventSucceed) { objectStore.createDatabase(db); @@ -155,34 +190,34 @@ public void createDatabase(Database db) throws InvalidObjectException, MetaExcep } @Override - public Database getDatabase(String dbName) throws NoSuchObjectException { - return objectStore.getDatabase(dbName); + public Database getDatabase(String catName, String dbName) throws NoSuchObjectException { + return objectStore.getDatabase(catName, dbName); } @Override - public boolean dropDatabase(String dbName) + public boolean dropDatabase(String catName, String dbName) throws NoSuchObjectException, MetaException { if (shouldEventSucceed) { - return objectStore.dropDatabase(dbName); + return objectStore.dropDatabase(catName, dbName); } else { throw new RuntimeException("Event failed."); } } @Override - public boolean alterDatabase(String dbName, Database db) + public boolean alterDatabase(String catName, String dbName, Database db) throws NoSuchObjectException, MetaException { - return objectStore.alterDatabase(dbName, db); + return objectStore.alterDatabase(catName, dbName, db); } @Override - public List getDatabases(String pattern) throws MetaException { - return objectStore.getDatabases(pattern); + public List getDatabases(String catName, String pattern) throws MetaException { + return objectStore.getDatabases(catName, pattern); } @Override - public List getAllDatabases() throws MetaException { - return objectStore.getAllDatabases(); + public List getAllDatabases(String catName) throws MetaException { + return objectStore.getAllDatabases(catName); } @Override @@ -210,19 +245,19 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException } @Override - public boolean dropTable(String dbName, String tableName) + public boolean dropTable(String catName, String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { if (shouldEventSucceed) { - return objectStore.dropTable(dbName, tableName); + return objectStore.dropTable(catName, dbName, tableName); } else { throw new RuntimeException("Event failed."); } } @Override - public Table getTable(String dbName, String tableName) throws MetaException { - return objectStore.getTable(dbName, tableName); + public Table getTable(String catName, String dbName, String tableName) throws MetaException { + return objectStore.getTable(catName, dbName, tableName); } @Override @@ -232,162 +267,159 @@ public boolean addPartition(Partition part) } @Override - public Partition getPartition(String dbName, String tableName, List partVals) + public Partition getPartition(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException { - return objectStore.getPartition(dbName, tableName, partVals); + return objectStore.getPartition(catName, dbName, tableName, partVals); } @Override - public boolean dropPartition(String dbName, String tableName, List partVals) + public boolean dropPartition(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { if (shouldEventSucceed) { - return objectStore.dropPartition(dbName, tableName, partVals); + return objectStore.dropPartition(catName, dbName, tableName, partVals); } else { throw new RuntimeException("Event failed."); } } @Override - public List getPartitions(String dbName, String tableName, int max) + public List getPartitions(String catName, String dbName, String tableName, int max) throws MetaException, NoSuchObjectException { - return objectStore.getPartitions(dbName, tableName, max); + return objectStore.getPartitions(catName, dbName, tableName, max); } @Override - public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) throws MetaException { - objectStore.updateCreationMetadata(dbname, tablename, cm); + objectStore.updateCreationMetadata(catName, dbname, tablename, cm); } - @Override - public void alterTable(String dbName, String name, Table newTable) + public void alterTable(String catName, String dbName, String name, Table newTable) throws InvalidObjectException, MetaException { if (shouldEventSucceed) { - objectStore.alterTable(dbName, name, newTable); + objectStore.alterTable(catName, dbName, name, newTable); } else { throw new RuntimeException("Event failed."); } } @Override - public List getTables(String dbName, String pattern) throws MetaException { - return objectStore.getTables(dbName, pattern); + public List getTables(String catName, String dbName, String pattern) throws MetaException { + return objectStore.getTables(catName, dbName, pattern); } @Override - public List getTables(String dbName, String pattern, TableType tableType) throws MetaException { - return objectStore.getTables(dbName, pattern, tableType); + public List getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException { + return objectStore.getTables(catName, dbName, pattern, tableType); } @Override - public List getMaterializedViewsForRewriting(String dbName) + public List getMaterializedViewsForRewriting(String catName, String dbName) throws MetaException, NoSuchObjectException { - return objectStore.getMaterializedViewsForRewriting(dbName); + return objectStore.getMaterializedViewsForRewriting(catName, dbName); } @Override - public List getTableMeta(String dbNames, String tableNames, List tableTypes) + public List getTableMeta(String catName, String dbNames, String tableNames, List tableTypes) throws MetaException { - return objectStore.getTableMeta(dbNames, tableNames, tableTypes); + return objectStore.getTableMeta(catName, dbNames, tableNames, tableTypes); } @Override - public List getTableObjectsByName(String dbName, List tableNames) + public List
getTableObjectsByName(String catName, String dbName, List tableNames) throws MetaException, UnknownDBException { - return objectStore.getTableObjectsByName(dbName, tableNames); + return objectStore.getTableObjectsByName(catName, dbName, tableNames); } @Override - public List getAllTables(String dbName) throws MetaException { - return objectStore.getAllTables(dbName); + public List getAllTables(String catName, String dbName) throws MetaException { + return objectStore.getAllTables(catName, dbName); } @Override - public List listTableNamesByFilter(String dbName, String filter, + public List listTableNamesByFilter(String catName, String dbName, String filter, short maxTables) throws MetaException, UnknownDBException { - return objectStore.listTableNamesByFilter(dbName, filter, maxTables); + return objectStore.listTableNamesByFilter(catName, dbName, filter, maxTables); } @Override - public List listPartitionNames(String dbName, String tblName, short maxParts) + public List listPartitionNames(String catName, String dbName, String tblName, short maxParts) throws MetaException { - return objectStore.listPartitionNames(dbName, tblName, maxParts); + return objectStore.listPartitionNames(catName, dbName, tblName, maxParts); } @Override - public PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException { + public PartitionValuesResponse listPartitionValues(String catName, String db_name, + String tbl_name, List cols, + boolean applyDistinct, String filter, + boolean ascending, List order, + long maxParts) throws MetaException { return null; } @Override - public List listPartitionNamesByFilter(String dbName, String tblName, - String filter, short maxParts) throws MetaException { - return objectStore.listPartitionNamesByFilter(dbName, tblName, filter, maxParts); - } - - @Override - public void alterPartition(String dbName, String tblName, List partVals, + public void alterPartition(String catName, String dbName, String tblName, List partVals, Partition newPart) throws InvalidObjectException, MetaException { if (shouldEventSucceed) { - objectStore.alterPartition(dbName, tblName, partVals, newPart); + objectStore.alterPartition(catName, dbName, tblName, partVals, newPart); } else { throw new RuntimeException("Event failed."); } } @Override - public void alterPartitions(String dbName, String tblName, + public void alterPartitions(String catName, String dbName, String tblName, List> partValsList, List newParts) throws InvalidObjectException, MetaException { - objectStore.alterPartitions(dbName, tblName, partValsList, newParts); + objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts); } @Override - public List getPartitionsByFilter(String dbName, String tblName, + public List getPartitionsByFilter(String catName, String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionsByFilter(dbName, tblName, filter, maxParts); + return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts); } @Override - public int getNumPartitionsByFilter(String dbName, String tblName, + public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException { - return objectStore.getNumPartitionsByFilter(dbName, tblName, filter); + return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter); } @Override - public int getNumPartitionsByExpr(String dbName, String tblName, + public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException { - return objectStore.getNumPartitionsByExpr(dbName, tblName, expr); + return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); } @Override - public List getPartitionsByNames(String dbName, String tblName, + public List getPartitionsByNames(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionsByNames(dbName, tblName, partNames); + return objectStore.getPartitionsByNames(catName, dbName, tblName, partNames); } @Override - public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, + public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException { - return objectStore.getPartitionsByExpr( + return objectStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); } @Override - public Table markPartitionForEvent(String dbName, String tblName, + public Table markPartitionForEvent(String catName, String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { - return objectStore.markPartitionForEvent(dbName, tblName, partVals, evtType); + return objectStore.markPartitionForEvent(catName, dbName, tblName, partVals, evtType); } @Override - public boolean isPartitionMarkedForEvent(String dbName, String tblName, + public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { - return objectStore.isPartitionMarkedForEvent(dbName, tblName, partName, evtType); + return objectStore.isPartitionMarkedForEvent(catName, dbName, tblName, partName, evtType); } @Override @@ -423,32 +455,32 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, } @Override - public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, + public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return objectStore.getDBPrivilegeSet(dbName, userName, groupNames); + return objectStore.getDBPrivilegeSet(catName, dbName, userName, groupNames); } @Override - public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return objectStore.getTablePrivilegeSet(dbName, tableName, userName, groupNames); + return objectStore.getTablePrivilegeSet(catName, dbName, tableName, userName, groupNames); } @Override - public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName, String partition, String userName, List groupNames) throws InvalidObjectException, MetaException { - return objectStore.getPartitionPrivilegeSet(dbName, tableName, partition, + return objectStore.getPartitionPrivilegeSet(catName, dbName, tableName, partition, userName, groupNames); } @Override - public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return objectStore.getColumnPrivilegeSet(dbName, tableName, partitionName, + return objectStore.getColumnPrivilegeSet(catName, dbName, tableName, partitionName, columnName, userName, groupNames); } @@ -460,40 +492,40 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa @Override public List listPrincipalDBGrants(String principalName, - PrincipalType principalType, String dbName) { - return objectStore.listPrincipalDBGrants(principalName, principalType, dbName); + PrincipalType principalType, String catName, String dbName) { + return objectStore.listPrincipalDBGrants(principalName, principalType, catName, dbName); } @Override public List listAllTableGrants(String principalName, - PrincipalType principalType, String dbName, String tableName) { + PrincipalType principalType, String catName, String dbName, String tableName) { return objectStore.listAllTableGrants(principalName, principalType, - dbName, tableName); + catName, dbName, tableName); } @Override public List listPrincipalPartitionGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, + PrincipalType principalType, String catName, String dbName, String tableName, List partValues, String partName) { return objectStore.listPrincipalPartitionGrants(principalName, principalType, - dbName, tableName, partValues, partName); + catName, dbName, tableName, partValues, partName); } @Override public List listPrincipalTableColumnGrants(String principalName, - PrincipalType principalType, String dbName, + PrincipalType principalType, String catName, String dbName, String tableName, String columnName) { return objectStore.listPrincipalTableColumnGrants(principalName, principalType, - dbName, tableName, columnName); + catName, dbName, tableName, columnName); } @Override public List listPrincipalPartitionColumnGrants( - String principalName, PrincipalType principalType, String dbName, String tableName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, List partVals, String partName, String columnName) { return objectStore.listPrincipalPartitionColumnGrants(principalName, principalType, - dbName, tableName, partVals, partName, columnName); + catName, dbName, tableName, partVals, partName, columnName); } @Override @@ -535,34 +567,34 @@ public Role getRole(String roleName) throws NoSuchObjectException { } @Override - public Partition getPartitionWithAuth(String dbName, String tblName, + public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { - return objectStore.getPartitionWithAuth(dbName, tblName, partVals, userName, + return objectStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); } @Override - public List getPartitionsWithAuth(String dbName, String tblName, + public List getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { - return objectStore.getPartitionsWithAuth(dbName, tblName, maxParts, userName, + return objectStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); } @Override - public List listPartitionNamesPs(String dbName, String tblName, + public List listPartitionNamesPs(String catName, String dbName, String tblName, List partVals, short maxParts) throws MetaException, NoSuchObjectException { - return objectStore.listPartitionNamesPs(dbName, tblName, partVals, maxParts); + return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts); } @Override - public List listPartitionsPsWithAuth(String dbName, String tblName, + public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, List partVals, short maxParts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { - return objectStore.listPartitionsPsWithAuth(dbName, tblName, partVals, maxParts, + return objectStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, userName, groupNames); } @@ -607,53 +639,52 @@ public long cleanupEvents() { } @Override - public List listDBGrantsAll(String dbName) { - return objectStore.listDBGrantsAll(dbName); + public List listDBGrantsAll(String catName, String dbName) { + return objectStore.listDBGrantsAll(catName, dbName); } @Override - public List listPartitionColumnGrantsAll(String dbName, String tableName, + public List listPartitionColumnGrantsAll(String catName, String dbName, String tableName, String partitionName, String columnName) { - return objectStore.listPartitionColumnGrantsAll(dbName, tableName, partitionName, columnName); + return objectStore.listPartitionColumnGrantsAll(catName, dbName, tableName, partitionName, columnName); } @Override - public List listTableGrantsAll(String dbName, String tableName) { - return objectStore.listTableGrantsAll(dbName, tableName); + public List listTableGrantsAll(String catName, String dbName, String tableName) { + return objectStore.listTableGrantsAll(catName, dbName, tableName); } @Override - public List listPartitionGrantsAll(String dbName, String tableName, + public List listPartitionGrantsAll(String catName, String dbName, String tableName, String partitionName) { - return objectStore.listPartitionGrantsAll(dbName, tableName, partitionName); + return objectStore.listPartitionGrantsAll(catName, dbName, tableName, partitionName); } @Override - public List listTableColumnGrantsAll(String dbName, String tableName, + public List listTableColumnGrantsAll(String catName, String dbName, String tableName, String columnName) { - return objectStore.listTableColumnGrantsAll(dbName, tableName, columnName); + return objectStore.listTableColumnGrantsAll(catName, dbName, tableName, columnName); } @Override - public ColumnStatistics getTableColumnStatistics(String dbName, String tableName, + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, List colNames) throws MetaException, NoSuchObjectException { - return objectStore.getTableColumnStatistics(dbName, tableName, colNames); + return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames); } @Override - public boolean deleteTableColumnStatistics(String dbName, String tableName, + public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, String colName) - throws NoSuchObjectException, MetaException, InvalidObjectException, - InvalidInputException { - return objectStore.deleteTableColumnStatistics(dbName, tableName, colName); + throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { + return objectStore.deleteTableColumnStatistics(catName, dbName, tableName, colName); } @Override - public boolean deletePartitionColumnStatistics(String dbName, String tableName, + public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - return objectStore.deletePartitionColumnStatistics(dbName, tableName, partName, + return objectStore.deletePartitionColumnStatistics(catName, dbName, tableName, partName, partVals, colName); } @@ -689,7 +720,7 @@ public String getToken(String tokenIdentifier) { @Override public List getAllTokenIdentifiers() { - return new ArrayList(); + return new ArrayList<>(); } @Override @@ -727,35 +758,35 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } @Override - public List getPartitionColumnStatistics(String dbName, + public List getPartitionColumnStatistics(String catName, String dbName, String tblName, List colNames, List partNames) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionColumnStatistics(dbName, tblName , colNames, partNames); + return objectStore.getPartitionColumnStatistics(catName, dbName, tblName , colNames, partNames); } @Override - public boolean doesPartitionExist(String dbName, String tableName, + public boolean doesPartitionExist(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException { - return objectStore.doesPartitionExist(dbName, tableName, partVals); + return objectStore.doesPartitionExist(catName, dbName, tableName, partVals); } @Override - public boolean addPartitions(String dbName, String tblName, List parts) + public boolean addPartitions(String catName, String dbName, String tblName, List parts) throws InvalidObjectException, MetaException { - return objectStore.addPartitions(dbName, tblName, parts); + return objectStore.addPartitions(catName, dbName, tblName, parts); } @Override - public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, + public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { return false; } @Override - public void dropPartitions(String dbName, String tblName, List partNames) + public void dropPartitions(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { - objectStore.dropPartitions(dbName, tblName, partNames); + objectStore.dropPartitions(catName, dbName, tblName, partNames); } @Override @@ -769,42 +800,42 @@ public void createFunction(Function func) throws InvalidObjectException, } @Override - public void alterFunction(String dbName, String funcName, Function newFunction) + public void alterFunction(String catName, String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException { - objectStore.alterFunction(dbName, funcName, newFunction); + objectStore.alterFunction(catName, dbName, funcName, newFunction); } @Override - public void dropFunction(String dbName, String funcName) + public void dropFunction(String catName, String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { if (shouldEventSucceed) { - objectStore.dropFunction(dbName, funcName); + objectStore.dropFunction(catName, dbName, funcName); } else { throw new RuntimeException("Event failed."); } } @Override - public Function getFunction(String dbName, String funcName) + public Function getFunction(String catName, String dbName, String funcName) throws MetaException { - return objectStore.getFunction(dbName, funcName); + return objectStore.getFunction(catName, dbName, funcName); } @Override - public List getAllFunctions() + public List getAllFunctions(String catName) throws MetaException { return Collections.emptyList(); } @Override - public List getFunctions(String dbName, String pattern) + public List getFunctions(String catName, String dbName, String pattern) throws MetaException { - return objectStore.getFunctions(dbName, pattern); + return objectStore.getFunctions(catName, dbName, pattern); } @Override - public AggrStats get_aggr_stats_for(String dbName, + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException { return null; @@ -882,38 +913,38 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public List getPrimaryKeys(String db_name, String tbl_name) + public List getPrimaryKeys(String catName, String db_name, String tbl_name) throws MetaException { return null; } @Override - public List getForeignKeys(String parent_db_name, + public List getForeignKeys(String catName, String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException { return null; } @Override - public List getUniqueConstraints(String db_name, String tbl_name) + public List getUniqueConstraints(String catName, String db_name, String tbl_name) throws MetaException { return null; } @Override - public List getNotNullConstraints(String db_name, String tbl_name) + public List getNotNullConstraints(String catName, String db_name, String tbl_name) throws MetaException { return null; } @Override - public List getCheckConstraints(String db_name, String tbl_name) + public List getCheckConstraints(String catName, String db_name, String tbl_name) throws MetaException { return null; } @Override - public List getDefaultConstraints(String db_name, String tbl_name) + public List getDefaultConstraints(String catName, String db_name, String tbl_name) throws MetaException { return null; } @@ -930,8 +961,9 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public void dropConstraint(String dbName, String tableName, - String constraintName) throws NoSuchObjectException { + public void dropConstraint(String catName, String dbName, String tableName, + String constraintName, boolean missingOk) + throws NoSuchObjectException { } @Override @@ -982,7 +1014,7 @@ public void createResourcePlan(WMResourcePlan resourcePlan, String copyFrom, int } @Override - public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException { + public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException, MetaException { return objectStore.getResourcePlan(name); } @@ -1083,6 +1115,13 @@ public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerNa objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath); } + @Override + public List getPartitionColStatsForDatabase(String catName, String dbName) + throws MetaException, NoSuchObjectException { + // TODO Auto-generated method stub + return null; + } + public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException, NoSuchObjectException { objectStore.createISchema(schema); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java index 62bd94ab8e..505b3c0f52 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java @@ -85,7 +85,7 @@ public void testTransactionalValidation() throws Throwable { .setTableName(tblName) .setTableParams(params) .setCols(type.getFields()) - .build(); + .build(conf); client.createTable(t); fail("Expected exception"); } catch (MetaException e) { @@ -102,7 +102,7 @@ public void testTransactionalValidation() throws Throwable { .setTableName(tblName) .setTableParams(params) .setCols(type.getFields()) - .build(); + .build(conf); client.createTable(t); fail("Expected exception"); } catch (MetaException e) { @@ -119,7 +119,7 @@ public void testTransactionalValidation() throws Throwable { .setTableName(tblName) .setTableParams(params) .setCols(type.getFields()) - .build(); + .build(conf); client.createTable(t); fail("Expected exception"); } catch (MetaException e) { @@ -139,7 +139,7 @@ public void testTransactionalValidation() throws Throwable { .setTableParams(params) .setCols(type.getFields()) .setBucketCols(bucketCols) - .build(); + .build(conf); client.createTable(t); fail("Expected exception"); } catch (MetaException e) { @@ -158,7 +158,7 @@ public void testTransactionalValidation() throws Throwable { .setBucketCols(bucketCols) .setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat") .setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat") - .build(); + .build(conf); client.createTable(t); assertTrue("CREATE TABLE should succeed", "true".equals(t.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL))); @@ -188,7 +188,7 @@ public void testTransactionalValidation() throws Throwable { .setTableName(tblName) .setCols(type.getFields()) .setInputFormat("org.apache.hadoop.mapred.FileInputFormat") - .build(); + .build(conf); client.createTable(t); params.put("transactional", "true"); t.setParameters(params); @@ -210,7 +210,7 @@ public void testTransactionalValidation() throws Throwable { .setBucketCols(bucketCols) .setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat") .setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat") - .build(); + .build(conf); client.createTable(t); params.put("transactional", "true"); t.setParameters(params); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 0940938430..7f1b662f49 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -82,6 +82,7 @@ import java.util.Arrays; import java.util.List; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertEquals; @@ -3075,11 +3076,11 @@ public void testConstraints() throws IOException { try { List pks = metaStoreClientMirror.getPrimaryKeys(new PrimaryKeysRequest(dbName+ "_dupe" , "tbl1")); assertEquals(pks.size(), 2); - List uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(dbName+ "_dupe" , "tbl3")); + List uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(DEFAULT_CATALOG_NAME, dbName+ "_dupe" , "tbl3")); assertEquals(uks.size(), 1); List fks = metaStoreClientMirror.getForeignKeys(new ForeignKeysRequest(null, null, dbName+ "_dupe" , "tbl2")); assertEquals(fks.size(), 2); - List nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(dbName+ "_dupe" , "tbl3")); + List nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(DEFAULT_CATALOG_NAME, dbName+ "_dupe" , "tbl3")); assertEquals(nns.size(), 1); } catch (TException te) { assertNull(te); @@ -3104,13 +3105,13 @@ public void testConstraints() throws IOException { List pks = metaStoreClientMirror.getPrimaryKeys(new PrimaryKeysRequest(dbName+ "_dupe" , "tbl4")); assertEquals(pks.size(), 2); pkName = pks.get(0).getPk_name(); - List uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(dbName+ "_dupe" , "tbl6")); + List uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(DEFAULT_CATALOG_NAME, dbName+ "_dupe" , "tbl6")); assertEquals(uks.size(), 1); ukName = uks.get(0).getUk_name(); List fks = metaStoreClientMirror.getForeignKeys(new ForeignKeysRequest(null, null, dbName+ "_dupe" , "tbl5")); assertEquals(fks.size(), 2); fkName = fks.get(0).getFk_name(); - List nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(dbName+ "_dupe" , "tbl6")); + List nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(DEFAULT_CATALOG_NAME, dbName+ "_dupe" , "tbl6")); assertEquals(nns.size(), 1); nnName = nns.get(0).getNn_name(); @@ -3133,11 +3134,11 @@ public void testConstraints() throws IOException { try { List pks = metaStoreClientMirror.getPrimaryKeys(new PrimaryKeysRequest(dbName+ "_dupe" , "tbl4")); assertTrue(pks.isEmpty()); - List uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(dbName+ "_dupe" , "tbl4")); + List uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(DEFAULT_CATALOG_NAME, dbName+ "_dupe" , "tbl4")); assertTrue(uks.isEmpty()); List fks = metaStoreClientMirror.getForeignKeys(new ForeignKeysRequest(null, null, dbName+ "_dupe" , "tbl5")); assertTrue(fks.isEmpty()); - List nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(dbName+ "_dupe" , "tbl6")); + List nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(DEFAULT_CATALOG_NAME, dbName+ "_dupe" , "tbl6")); assertTrue(nns.isEmpty()); } catch (TException te) { assertNull(te); diff --git metastore/scripts/upgrade/derby/053-HIVE-18755.derby.sql metastore/scripts/upgrade/derby/053-HIVE-18755.derby.sql new file mode 100644 index 0000000000..776ef1471a --- /dev/null +++ metastore/scripts/upgrade/derby/053-HIVE-18755.derby.sql @@ -0,0 +1,54 @@ + +CREATE TABLE "APP"."CTLGS" ( + "CTLG_ID" BIGINT NOT NULL, + "NAME" VARCHAR(256) UNIQUE, + "DESC" VARCHAR(4000), + "LOCATION_URI" VARCHAR(4000) NOT NULL); + +ALTER TABLE "APP"."CTLGS" ADD CONSTRAINT "CTLGS_PK" PRIMARY KEY ("CTLG_ID"); + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO "APP"."CTLGS" VALUES (1, 'hive', 'Default catalog for Hive', 'TBD'); + +-- Drop the unique index on DBS +DROP INDEX "APP"."UNIQUE_DATABASE"; + +-- Add the new column to the DBS table, can't put in the not null constraint yet +ALTER TABLE "APP"."DBS" ADD COLUMN "CTLG_NAME" VARCHAR(256); + +-- Update all records in the DBS table to point to the Hive catalog +UPDATE "APP"."DBS" + SET "CTLG_NAME" = 'hive'; + +-- Add the not null constraint +ALTER TABLE "APP"."DBS" ALTER COLUMN "CTLG_NAME" NOT NULL; + +-- Put back the unique index +CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME", "CTLG_NAME"); + +-- Add the foreign key +ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION; + +-- Add columns to table stats and part stats +ALTER TABLE "APP"."TAB_COL_STATS" ADD COLUMN "CAT_NAME" VARCHAR(256); +ALTER TABLE "APP"."PART_COL_STATS" ADD COLUMN "CAT_NAME" VARCHAR(256); + +-- Set the existing column names to Hive +UPDATE "APP"."TAB_COL_STATS" + SET "CAT_NAME" = 'hive'; +UPDATE "APP"."PART_COL_STATS" + SET "CAT_NAME" = 'hive'; + +-- Add the not null constraint +ALTER TABLE "APP"."TAB_COL_STATS" ALTER COLUMN "CAT_NAME" NOT NULL; +ALTER TABLE "APP"."PART_COL_STATS" ALTER COLUMN "CAT_NAME" NOT NULL; + +-- Rebuild the index for Part col stats. No such index for table stats, which seems weird +DROP INDEX "APP"."PCS_STATS_IDX"; +CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); + +-- Add column to partition events +ALTER TABLE "APP"."PARTITION_EVENTS" ADD COLUMN "CAT_NAME" VARCHAR(256); + +-- Add column to notification log +ALTER TABLE "APP"."NOTIFICATION_LOG" ADD COLUMN "CAT_NAME" VARCHAR(256); diff --git metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql index 1e4dd99f1c..1a3c00a489 100644 --- metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql +++ metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql @@ -10,5 +10,6 @@ RUN '049-HIVE-18489.derby.sql'; RUN '050-HIVE-18192.derby.sql'; RUN '051-HIVE-18675.derby.sql'; RUN '052-HIVE-18965.derby.sql'; +RUN '053-HIVE-18755.derby.sql'; UPDATE "APP".VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 314a1868c0..e949d6bd95 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.exec; import static org.apache.commons.lang.StringUtils.join; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; import java.io.BufferedWriter; @@ -5025,8 +5026,8 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { // We set the signature for the view if it is a materialized view if (tbl.isMaterializedView()) { CreationMetadata cm = - new CreationMetadata(tbl.getDbName(), tbl.getTableName(), - ImmutableSet.copyOf(crtView.getTablesUsed())); + new CreationMetadata(MetaStoreUtils.getDefaultCatalog(conf), tbl.getDbName(), + tbl.getTableName(), ImmutableSet.copyOf(crtView.getTablesUsed())); cm.setValidTxnList(conf.get(ValidTxnList.VALID_TXNS_KEY)); tbl.getTTable().setCreationMetadata(cm); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java index de120afbbc..50fc4e0c63 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java @@ -21,6 +21,7 @@ import com.google.common.collect.ImmutableSet; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -31,6 +32,8 @@ import java.io.Serializable; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + /** * This task does some work related to materialized views. In particular, it adds * or removes the materialized view from the registry if needed, or registers new @@ -63,7 +66,8 @@ public int execute(DriverContext driverContext) { Hive db = Hive.get(conf); Table mvTable = db.getTable(getWork().getViewName()); CreationMetadata cm = - new CreationMetadata(mvTable.getDbName(), mvTable.getTableName(), + new CreationMetadata(MetaStoreUtils.getDefaultCatalog(conf), mvTable.getDbName(), + mvTable.getTableName(), ImmutableSet.copyOf(mvTable.getCreationMetadata().getTablesUsed())); cm.setValidTxnList(conf.get(ValidTxnList.VALID_TXNS_KEY)); db.updateCreationMetadata(mvTable.getDbName(), mvTable.getTableName(), cm); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 5ad4406cef..149448b75d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -25,7 +25,9 @@ import com.google.common.collect.Sets; import com.google.common.util.concurrent.ThreadFactoryBuilder; import static org.apache.hadoop.hive.conf.Constants.MATERIALIZED_VIEW_REWRITING_TIME_WINDOW; + import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT; import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME; @@ -4483,7 +4485,7 @@ public void dropConstraint(String dbName, String tableName, String constraintNam public List getUniqueConstraintList(String dbName, String tblName) throws HiveException, NoSuchObjectException { try { - return getMSC().getUniqueConstraints(new UniqueConstraintsRequest(dbName, tblName)); + return getMSC().getUniqueConstraints(new UniqueConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); } catch (NoSuchObjectException e) { throw e; } catch (Exception e) { @@ -4493,7 +4495,7 @@ public void dropConstraint(String dbName, String tableName, String constraintNam public List getNotNullConstraintList(String dbName, String tblName) throws HiveException, NoSuchObjectException { try { - return getMSC().getNotNullConstraints(new NotNullConstraintsRequest(dbName, tblName)); + return getMSC().getNotNullConstraints(new NotNullConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); } catch (NoSuchObjectException e) { throw e; } catch (Exception e) { @@ -4503,7 +4505,7 @@ public void dropConstraint(String dbName, String tableName, String constraintNam public List getDefaultConstraintList(String dbName, String tblName) throws HiveException, NoSuchObjectException { try { - return getMSC().getDefaultConstraints(new DefaultConstraintsRequest(dbName, tblName)); + return getMSC().getDefaultConstraints(new DefaultConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); } catch (NoSuchObjectException e) { throw e; } catch (Exception e) { @@ -4513,7 +4515,8 @@ public void dropConstraint(String dbName, String tableName, String constraintNam public List getCheckConstraintList(String dbName, String tblName) throws HiveException, NoSuchObjectException { try { - return getMSC().getCheckConstraints(new CheckConstraintsRequest(dbName, tblName)); + return getMSC().getCheckConstraints(new CheckConstraintsRequest(getDefaultCatalog(conf), + dbName, tblName)); } catch (NoSuchObjectException e) { throw e; } catch (Exception e) { @@ -4627,7 +4630,7 @@ private UniqueConstraint getUniqueConstraints(String dbName, String tblName, boo throws HiveException { try { List uniqueConstraints = getMSC().getUniqueConstraints( - new UniqueConstraintsRequest(dbName, tblName)); + new UniqueConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); if (onlyReliable && uniqueConstraints != null && !uniqueConstraints.isEmpty()) { uniqueConstraints = uniqueConstraints.stream() .filter(uk -> uk.isRely_cstr()) @@ -4675,7 +4678,7 @@ public NotNullConstraint getEnabledNotNullConstraints(String dbName, String tblN throws HiveException { try { List notNullConstraints = getMSC().getNotNullConstraints( - new NotNullConstraintsRequest(dbName, tblName)); + new NotNullConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); if (notNullConstraints != null && !notNullConstraints.isEmpty()) { notNullConstraints = notNullConstraints.stream() .filter(nnc -> nnc.isEnable_cstr()) @@ -4699,7 +4702,7 @@ public CheckConstraint getEnabledCheckConstraints(String dbName, String tblName) throws HiveException { try { List checkConstraints = getMSC().getCheckConstraints( - new CheckConstraintsRequest(dbName, tblName)); + new CheckConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); if (checkConstraints != null && !checkConstraints.isEmpty()) { checkConstraints = checkConstraints.stream() .filter(nnc -> nnc.isEnable_cstr()) @@ -4722,7 +4725,7 @@ public DefaultConstraint getEnabledDefaultConstraints(String dbName, String tblN throws HiveException { try { List defaultConstraints = getMSC().getDefaultConstraints( - new DefaultConstraintsRequest(dbName, tblName)); + new DefaultConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); if (defaultConstraints != null && !defaultConstraints.isEmpty()) { defaultConstraints = defaultConstraints.stream() .filter(nnc -> nnc.isEnable_cstr()) @@ -4738,7 +4741,7 @@ private NotNullConstraint getNotNullConstraints(String dbName, String tblName, b throws HiveException { try { List notNullConstraints = getMSC().getNotNullConstraints( - new NotNullConstraintsRequest(dbName, tblName)); + new NotNullConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); if (onlyReliable && notNullConstraints != null && !notNullConstraints.isEmpty()) { notNullConstraints = notNullConstraints.stream() .filter(nnc -> nnc.isRely_cstr()) @@ -4754,7 +4757,7 @@ public DefaultConstraint getDefaultConstraints(String dbName, String tblName) throws HiveException { try { List defaultConstraints = getMSC().getDefaultConstraints( - new DefaultConstraintsRequest(dbName, tblName)); + new DefaultConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); if (defaultConstraints != null && !defaultConstraints.isEmpty()) { defaultConstraints = defaultConstraints.stream() .collect(Collectors.toList()); @@ -4769,7 +4772,7 @@ public CheckConstraint getCheckConstraints(String dbName, String tblName) throws HiveException { try { List checkConstraints = getMSC().getCheckConstraints( - new CheckConstraintsRequest(dbName, tblName)); + new CheckConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); if (checkConstraints != null && !checkConstraints.isEmpty()) { checkConstraints = checkConstraints.stream() .collect(Collectors.toList()); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index d79b6ed059..c3d0e4023c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -88,6 +88,10 @@ private Warehouse getWh() throws MetaException { return wh; } + // TODO CAT - a number of these need to be updated. Don't bother with deprecated methods as + // this is just an internal class. Wait until we're ready to move all the catalog stuff up + // into ql. + @Override protected void create_table_with_environment_context( org.apache.hadoop.hive.metastore.api.Table tbl, EnvironmentContext envContext) @@ -103,10 +107,13 @@ protected void create_table_with_environment_context( } @Override - protected void drop_table_with_environment_context(String dbname, String name, + protected void drop_table_with_environment_context(String catName, String dbname, String name, boolean deleteData, EnvironmentContext envContext) throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException { // First try temp table + // TODO CAT - I think the right thing here is to always put temp tables in the current + // catalog. But we don't yet have a notion of current catalog, so we'll have to hold on + // until we do. org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbname, name); if (table != null) { try { @@ -120,7 +127,7 @@ protected void drop_table_with_environment_context(String dbname, String name, } // Try underlying client - super.drop_table_with_environment_context(dbname, name, deleteData, envContext); + super.drop_table_with_environment_context(catName, dbname, name, deleteData, envContext); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 5eefc1539b..7470c8ae78 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -108,6 +108,9 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; + /** * BaseSemanticAnalyzer. * @@ -646,16 +649,16 @@ private static String spliceString(String str, int i, int length, String replace } protected List getColumns(ASTNode ast) throws SemanticException { - return getColumns(ast, true); + return getColumns(ast, true, conf); } /** * Get the list of FieldSchema out of the ASTNode. */ - public static List getColumns(ASTNode ast, boolean lowerCase) throws SemanticException { - return getColumns(ast, lowerCase, null,new ArrayList(), new ArrayList(), - new ArrayList(), new ArrayList(), - new ArrayList(), new ArrayList()); + public static List getColumns(ASTNode ast, boolean lowerCase, Configuration conf) + throws SemanticException { + return getColumns(ast, lowerCase, null, new ArrayList<>(), new ArrayList<>(), new ArrayList<>(), + new ArrayList<>(), new ArrayList<>(), new ArrayList<>(), conf); } private static class ConstraintInfo { @@ -717,79 +720,83 @@ private static void constraintInfosToPrimaryKeys(String databaseName, String tab /** * Process the unique constraints from the ast node and populate the SQLUniqueConstraint list. */ - protected static void processUniqueConstraints(String databaseName, String tableName, + protected static void processUniqueConstraints(String catName, String databaseName, String tableName, ASTNode child, List uniqueConstraints) throws SemanticException { List uniqueInfos = new ArrayList(); generateConstraintInfos(child, uniqueInfos); - constraintInfosToUniqueConstraints(databaseName, tableName, uniqueInfos, uniqueConstraints); + constraintInfosToUniqueConstraints(catName, databaseName, tableName, uniqueInfos, uniqueConstraints); } - protected static void processUniqueConstraints(String databaseName, String tableName, + protected static void processUniqueConstraints(String catName, String databaseName, String tableName, ASTNode child, List columnNames, List uniqueConstraints) throws SemanticException { List uniqueInfos = new ArrayList(); generateConstraintInfos(child, columnNames, uniqueInfos, null, null); - constraintInfosToUniqueConstraints(databaseName, tableName, uniqueInfos, uniqueConstraints); + constraintInfosToUniqueConstraints(catName, databaseName, tableName, uniqueInfos, uniqueConstraints); } - private static void constraintInfosToUniqueConstraints(String databaseName, String tableName, + private static void constraintInfosToUniqueConstraints(String catName, String databaseName, String tableName, List uniqueInfos, List uniqueConstraints) { int i = 1; for (ConstraintInfo uniqueInfo : uniqueInfos) { - uniqueConstraints.add(new SQLUniqueConstraint(databaseName, tableName, uniqueInfo.colName, + uniqueConstraints.add(new SQLUniqueConstraint(catName, databaseName, tableName, uniqueInfo.colName, i++, uniqueInfo.constraintName, uniqueInfo.enable, uniqueInfo.validate, uniqueInfo.rely)); } } - protected static void processCheckConstraints(String databaseName, String tableName, + protected static void processCheckConstraints(String catName, String databaseName, String tableName, ASTNode child, List columnNames, List checkConstraints, final ASTNode typeChild, final TokenRewriteStream tokenRewriteStream) throws SemanticException { List checkInfos = new ArrayList(); generateConstraintInfos(child, columnNames, checkInfos, typeChild, tokenRewriteStream); - constraintInfosToCheckConstraints(databaseName, tableName, checkInfos, checkConstraints); + constraintInfosToCheckConstraints(catName, databaseName, tableName, checkInfos, checkConstraints); } - private static void constraintInfosToCheckConstraints(String databaseName, String tableName, + private static void constraintInfosToCheckConstraints(String catName, String databaseName, String tableName, List checkInfos, List checkConstraints) { for (ConstraintInfo checkInfo : checkInfos) { - checkConstraints.add(new SQLCheckConstraint(databaseName, tableName, checkInfo.colName, + checkConstraints.add(new SQLCheckConstraint(catName, databaseName, tableName, checkInfo.colName, checkInfo.defaultValue, checkInfo.constraintName, checkInfo.enable, checkInfo.validate, checkInfo.rely)); } } - protected static void processDefaultConstraints(String databaseName, String tableName, + + protected static void processDefaultConstraints(String catName, String databaseName, String tableName, ASTNode child, List columnNames, List defaultConstraints, final ASTNode typeChild) throws SemanticException { List defaultInfos = new ArrayList(); generateConstraintInfos(child, columnNames, defaultInfos, typeChild, null); - constraintInfosToDefaultConstraints(databaseName, tableName, defaultInfos, defaultConstraints); + constraintInfosToDefaultConstraints(catName, databaseName, tableName, defaultInfos, defaultConstraints); } - private static void constraintInfosToDefaultConstraints(String databaseName, String tableName, + private static void constraintInfosToDefaultConstraints( + String catName, String databaseName, String tableName, List defaultInfos, List defaultConstraints) { for (ConstraintInfo defaultInfo : defaultInfos) { - defaultConstraints.add(new SQLDefaultConstraint(databaseName, tableName, defaultInfo.colName, - defaultInfo.defaultValue, defaultInfo.constraintName, defaultInfo.enable, - defaultInfo.validate, defaultInfo.rely)); + defaultConstraints.add(new SQLDefaultConstraint(catName, databaseName, tableName, + defaultInfo.colName, defaultInfo.defaultValue, defaultInfo.constraintName, + defaultInfo.enable, defaultInfo.validate, defaultInfo.rely)); } } - protected static void processNotNullConstraints(String databaseName, String tableName, + protected static void processNotNullConstraints(String catName, String databaseName, String tableName, ASTNode child, List columnNames, List notNullConstraints) throws SemanticException { List notNullInfos = new ArrayList(); generateConstraintInfos(child, columnNames, notNullInfos, null, null); - constraintInfosToNotNullConstraints(databaseName, tableName, notNullInfos, notNullConstraints); + constraintInfosToNotNullConstraints(catName, databaseName, tableName, notNullInfos, notNullConstraints); } - private static void constraintInfosToNotNullConstraints(String databaseName, String tableName, - List notNullInfos, List notNullConstraints) { + private static void constraintInfosToNotNullConstraints( + String catName, String databaseName, String tableName, List notNullInfos, + List notNullConstraints) { for (ConstraintInfo notNullInfo : notNullInfos) { - notNullConstraints.add(new SQLNotNullConstraint(databaseName, tableName, notNullInfo.colName, - notNullInfo.constraintName, notNullInfo.enable, notNullInfo.validate, notNullInfo.rely)); + notNullConstraints.add(new SQLNotNullConstraint(catName, databaseName, tableName, + notNullInfo.colName, notNullInfo.constraintName, notNullInfo.enable, notNullInfo.validate, + notNullInfo.rely)); } } @@ -1176,13 +1183,12 @@ private static void checkColumnName(String columnName) throws SemanticException * Get the list of FieldSchema out of the ASTNode. * Additionally, populate the primaryKeys and foreignKeys if any. */ - public static List getColumns(ASTNode ast, boolean lowerCase, - TokenRewriteStream tokenRewriteStream, - List primaryKeys, List foreignKeys, - List uniqueConstraints, List notNullConstraints, - List defaultConstraints, - List checkConstraints) - throws SemanticException { + public static List getColumns( + ASTNode ast, boolean lowerCase, TokenRewriteStream tokenRewriteStream, + List primaryKeys, List foreignKeys, + List uniqueConstraints, List notNullConstraints, + List defaultConstraints, List checkConstraints, + Configuration conf) throws SemanticException { List colList = new ArrayList(); Tree parent = ast.getParent(); @@ -1192,7 +1198,11 @@ private static void checkColumnName(String columnName) throws SemanticException switch (child.getToken().getType()) { case HiveParser.TOK_UNIQUE: { String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); - processUniqueConstraints(qualifiedTabName[0], qualifiedTabName[1], child, uniqueConstraints); + // TODO CAT - for now always use the default catalog. Eventually will want to see if + // the user specified a catalog + String catName = MetaStoreUtils.getDefaultCatalog(conf); + processUniqueConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], child, + uniqueConstraints); } break; case HiveParser.TOK_PRIMARY_KEY: { @@ -1237,23 +1247,26 @@ private static void checkColumnName(String columnName) throws SemanticException } if (constraintChild != null) { String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); + // TODO CAT - for now always use the default catalog. Eventually will want to see if + // the user specified a catalog + String catName = MetaStoreUtils.getDefaultCatalog(conf); // Process column constraint switch (constraintChild.getToken().getType()) { case HiveParser.TOK_CHECK_CONSTRAINT: - processCheckConstraints(qualifiedTabName[0], qualifiedTabName[1], constraintChild, + processCheckConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], constraintChild, ImmutableList.of(col.getName()), checkConstraints, typeChild, tokenRewriteStream); break; case HiveParser.TOK_DEFAULT_VALUE: - processDefaultConstraints(qualifiedTabName[0], qualifiedTabName[1], constraintChild, + processDefaultConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], constraintChild, ImmutableList.of(col.getName()), defaultConstraints, typeChild); break; case HiveParser.TOK_NOT_NULL: - processNotNullConstraints(qualifiedTabName[0], qualifiedTabName[1], constraintChild, + processNotNullConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], constraintChild, ImmutableList.of(col.getName()), notNullConstraints); break; case HiveParser.TOK_UNIQUE: - processUniqueConstraints(qualifiedTabName[0], qualifiedTabName[1], constraintChild, + processUniqueConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], constraintChild, ImmutableList.of(col.getName()), uniqueConstraints); break; case HiveParser.TOK_PRIMARY_KEY: diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 5b9ab3aa7f..adae018718 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -285,6 +285,9 @@ public void analyzeInternal(ASTNode input) throws SemanticException { case HiveParser.TOK_ALTERTABLE: { ast = (ASTNode) input.getChild(1); String[] qualified = getQualifiedTableName((ASTNode) input.getChild(0)); + // TODO CAT - for now always use the default catalog. Eventually will want to see if + // the user specified a catalog + String catName = MetaStoreUtils.getDefaultCatalog(conf); String tableName = getDotName(qualified); HashMap partSpec = null; ASTNode partSpecNode = (ASTNode)input.getChild(2); @@ -312,7 +315,7 @@ public void analyzeInternal(ASTNode input) throws SemanticException { } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_REPLACECOLS) { analyzeAlterTableModifyCols(qualified, ast, partSpec, AlterTableTypes.REPLACECOLS); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAMECOL) { - analyzeAlterTableRenameCol(qualified, ast, partSpec); + analyzeAlterTableRenameCol(catName, qualified, ast, partSpec); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDPARTS) { analyzeAlterTableAddParts(qualified, ast, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPARTS) { @@ -2149,6 +2152,9 @@ private void analyzeAlterTableAddConstraint(ASTNode ast, String tableName) throws SemanticException { ASTNode parent = (ASTNode) ast.getParent(); String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); + // TODO CAT - for now always use the default catalog. Eventually will want to see if + // the user specified a catalog + String catName = MetaStoreUtils.getDefaultCatalog(conf); ASTNode child = (ASTNode) ast.getChild(0); List primaryKeys = new ArrayList<>(); List foreignKeys = new ArrayList<>(); @@ -2156,7 +2162,7 @@ private void analyzeAlterTableAddConstraint(ASTNode ast, String tableName) switch (child.getToken().getType()) { case HiveParser.TOK_UNIQUE: - BaseSemanticAnalyzer.processUniqueConstraints(qualifiedTabName[0], qualifiedTabName[1], + BaseSemanticAnalyzer.processUniqueConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], child, uniqueConstraints); break; case HiveParser.TOK_PRIMARY_KEY: @@ -3075,7 +3081,7 @@ private void analyzeAlterTableRename(String[] source, ASTNode ast, boolean expec alterTblDesc))); } - private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast, + private void analyzeAlterTableRenameCol(String catName, String[] qualified, ASTNode ast, HashMap partSpec) throws SemanticException { String newComment = null; boolean first = false; @@ -3119,23 +3125,23 @@ private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast, switch (constraintChild.getToken().getType()) { case HiveParser.TOK_CHECK_CONSTRAINT: checkConstraints = new ArrayList<>(); - processCheckConstraints(qualified[0], qualified[1], constraintChild, + processCheckConstraints(catName, qualified[0], qualified[1], constraintChild, ImmutableList.of(newColName), checkConstraints, (ASTNode)ast.getChild(2), this.ctx.getTokenRewriteStream()); break; case HiveParser.TOK_DEFAULT_VALUE: defaultConstraints = new ArrayList<>(); - processDefaultConstraints(qualified[0], qualified[1], constraintChild, + processDefaultConstraints(catName, qualified[0], qualified[1], constraintChild, ImmutableList.of(newColName), defaultConstraints, (ASTNode)ast.getChild(2)); break; case HiveParser.TOK_NOT_NULL: notNullConstraints = new ArrayList<>(); - processNotNullConstraints(qualified[0], qualified[1], constraintChild, + processNotNullConstraints(catName, qualified[0], qualified[1], constraintChild, ImmutableList.of(newColName), notNullConstraints); break; case HiveParser.TOK_UNIQUE: uniqueConstraints = new ArrayList<>(); - processUniqueConstraints(qualified[0], qualified[1], constraintChild, + processUniqueConstraints(catName, qualified[0], qualified[1], constraintChild, ImmutableList.of(newColName), uniqueConstraints); break; case HiveParser.TOK_PRIMARY_KEY: diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java index 762e438f91..88b6068941 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java @@ -86,7 +86,7 @@ private void analyzeCreateMacro(ASTNode ast) throws SemanticException { } List arguments = - BaseSemanticAnalyzer.getColumns((ASTNode)ast.getChild(1), true); + BaseSemanticAnalyzer.getColumns((ASTNode)ast.getChild(1), true, conf); boolean isNoArgumentMacro = arguments.size() == 0; RowResolver rowResolver = new RowResolver(); ArrayList macroColNames = new ArrayList(arguments.size()); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 26f20f2e05..53f3269a7a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -12723,14 +12723,14 @@ ASTNode analyzeCreateTable( break; case HiveParser.TOK_TABCOLLIST: cols = getColumns(child, true, ctx.getTokenRewriteStream(), primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); + uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, conf); break; case HiveParser.TOK_TABLECOMMENT: comment = unescapeSQLString(child.getChild(0).getText()); break; case HiveParser.TOK_TABLEPARTCOLS: - partCols = getColumns(child, false, ctx.getTokenRewriteStream(),primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); + partCols = getColumns(child, false, ctx.getTokenRewriteStream(), primaryKeys, foreignKeys, + uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints, conf); if(hasConstraints(partCols, defaultConstraints, notNullConstraints, checkConstraints)) { //TODO: these constraints should be supported for partition columns throw new SemanticException( diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProviderBase.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProviderBase.java index 2c7064bd47..8a7c06d40e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProviderBase.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProviderBase.java @@ -21,6 +21,8 @@ import java.util.List; import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -89,7 +91,7 @@ public Database getDatabase(String dbName) throws HiveException { return Hive.getWithFastCheck(conf).getDatabase(dbName); } else { try { - return handler.get_database_core(dbName); + return handler.get_database_core(MetaStoreUtils.getDefaultCatalog(conf), dbName); } catch (NoSuchObjectException e) { throw new HiveException(e); } catch (MetaException e) { diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/AuthorizationMetaStoreFilterHook.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/AuthorizationMetaStoreFilterHook.java index 233a48cc6c..ca4b667a76 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/AuthorizationMetaStoreFilterHook.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/AuthorizationMetaStoreFilterHook.java @@ -43,7 +43,8 @@ public AuthorizationMetaStoreFilterHook(Configuration conf) { } @Override - public List filterTableNames(String dbName, List tableList) throws MetaException { + public List filterTableNames(String catName, String dbName, List tableList) + throws MetaException { List listObjs = getHivePrivObjects(dbName, tableList); return getTableNames(getFilteredObjects(listObjs)); } diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java index ade7726336..dd0929f2b9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java @@ -46,6 +46,8 @@ import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; + /** * Superclass for all threads in the compactor. */ @@ -102,7 +104,7 @@ public void init(AtomicBoolean stop, AtomicBoolean looped) throws MetaException */ protected Table resolveTable(CompactionInfo ci) throws MetaException { try { - return rs.getTable(ci.dbname, ci.tableName); + return rs.getTable(getDefaultCatalog(conf), ci.dbname, ci.tableName); } catch (MetaException e) { LOG.error("Unable to find table " + ci.getFullTableName() + ", " + e.getMessage()); throw e; @@ -120,7 +122,7 @@ protected Partition resolvePartition(CompactionInfo ci) throws Exception { if (ci.partName != null) { List parts; try { - parts = rs.getPartitionsByNames(ci.dbname, ci.tableName, + parts = rs.getPartitionsByNames(getDefaultCatalog(conf), ci.dbname, ci.tableName, Collections.singletonList(ci.partName)); if (parts == null || parts.size() == 0) { // The partition got dropped before we went looking for it. diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index 0f49d93277..5897fae45f 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -419,11 +419,11 @@ uint32_t ThriftHiveMetastore_setMetaConf_presult::read(::apache::thrift::protoco } -ThriftHiveMetastore_create_database_args::~ThriftHiveMetastore_create_database_args() throw() { +ThriftHiveMetastore_create_catalog_args::~ThriftHiveMetastore_create_catalog_args() throw() { } -uint32_t ThriftHiveMetastore_create_database_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_catalog_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -446,8 +446,8 @@ uint32_t ThriftHiveMetastore_create_database_args::read(::apache::thrift::protoc { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->database.read(iprot); - this->__isset.database = true; + xfer += this->catalog.read(iprot); + this->__isset.catalog = true; } else { xfer += iprot->skip(ftype); } @@ -464,13 +464,13 @@ uint32_t ThriftHiveMetastore_create_database_args::read(::apache::thrift::protoc return xfer; } -uint32_t ThriftHiveMetastore_create_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_catalog_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_catalog_args"); - xfer += oprot->writeFieldBegin("database", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->database.write(oprot); + xfer += oprot->writeFieldBegin("catalog", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->catalog.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -479,17 +479,17 @@ uint32_t ThriftHiveMetastore_create_database_args::write(::apache::thrift::proto } -ThriftHiveMetastore_create_database_pargs::~ThriftHiveMetastore_create_database_pargs() throw() { +ThriftHiveMetastore_create_catalog_pargs::~ThriftHiveMetastore_create_catalog_pargs() throw() { } -uint32_t ThriftHiveMetastore_create_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_catalog_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_catalog_pargs"); - xfer += oprot->writeFieldBegin("database", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->database)).write(oprot); + xfer += oprot->writeFieldBegin("catalog", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->catalog)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -498,11 +498,11 @@ uint32_t ThriftHiveMetastore_create_database_pargs::write(::apache::thrift::prot } -ThriftHiveMetastore_create_database_result::~ThriftHiveMetastore_create_database_result() throw() { +ThriftHiveMetastore_create_catalog_result::~ThriftHiveMetastore_create_catalog_result() throw() { } -uint32_t ThriftHiveMetastore_create_database_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_catalog_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -559,11 +559,11 @@ uint32_t ThriftHiveMetastore_create_database_result::read(::apache::thrift::prot return xfer; } -uint32_t ThriftHiveMetastore_create_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_catalog_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_catalog_result"); if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -584,11 +584,11 @@ uint32_t ThriftHiveMetastore_create_database_result::write(::apache::thrift::pro } -ThriftHiveMetastore_create_database_presult::~ThriftHiveMetastore_create_database_presult() throw() { +ThriftHiveMetastore_create_catalog_presult::~ThriftHiveMetastore_create_catalog_presult() throw() { } -uint32_t ThriftHiveMetastore_create_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_catalog_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -646,11 +646,11 @@ uint32_t ThriftHiveMetastore_create_database_presult::read(::apache::thrift::pro } -ThriftHiveMetastore_get_database_args::~ThriftHiveMetastore_get_database_args() throw() { +ThriftHiveMetastore_get_catalog_args::~ThriftHiveMetastore_get_catalog_args() throw() { } -uint32_t ThriftHiveMetastore_get_database_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_catalog_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -672,9 +672,9 @@ uint32_t ThriftHiveMetastore_get_database_args::read(::apache::thrift::protocol: switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->name); - this->__isset.name = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->catName.read(iprot); + this->__isset.catName = true; } else { xfer += iprot->skip(ftype); } @@ -691,13 +691,13 @@ uint32_t ThriftHiveMetastore_get_database_args::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_get_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_catalog_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalog_args"); - xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->catName.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -706,17 +706,17 @@ uint32_t ThriftHiveMetastore_get_database_args::write(::apache::thrift::protocol } -ThriftHiveMetastore_get_database_pargs::~ThriftHiveMetastore_get_database_pargs() throw() { +ThriftHiveMetastore_get_catalog_pargs::~ThriftHiveMetastore_get_catalog_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_catalog_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalog_pargs"); - xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->name))); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->catName)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -725,11 +725,11 @@ uint32_t ThriftHiveMetastore_get_database_pargs::write(::apache::thrift::protoco } -ThriftHiveMetastore_get_database_result::~ThriftHiveMetastore_get_database_result() throw() { +ThriftHiveMetastore_get_catalog_result::~ThriftHiveMetastore_get_catalog_result() throw() { } -uint32_t ThriftHiveMetastore_get_database_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_catalog_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -786,11 +786,11 @@ uint32_t ThriftHiveMetastore_get_database_result::read(::apache::thrift::protoco return xfer; } -uint32_t ThriftHiveMetastore_get_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_catalog_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalog_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -811,11 +811,11 @@ uint32_t ThriftHiveMetastore_get_database_result::write(::apache::thrift::protoc } -ThriftHiveMetastore_get_database_presult::~ThriftHiveMetastore_get_database_presult() throw() { +ThriftHiveMetastore_get_catalog_presult::~ThriftHiveMetastore_get_catalog_presult() throw() { } -uint32_t ThriftHiveMetastore_get_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_catalog_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -873,11 +873,11 @@ uint32_t ThriftHiveMetastore_get_database_presult::read(::apache::thrift::protoc } -ThriftHiveMetastore_drop_database_args::~ThriftHiveMetastore_drop_database_args() throw() { +ThriftHiveMetastore_get_catalogs_args::~ThriftHiveMetastore_get_catalogs_args() throw() { } -uint32_t ThriftHiveMetastore_drop_database_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_catalogs_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -896,36 +896,7 @@ uint32_t ThriftHiveMetastore_drop_database_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->name); - this->__isset.name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->deleteData); - this->__isset.deleteData = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->cascade); - this->__isset.cascade = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } + xfer += iprot->skip(ftype); xfer += iprot->readFieldEnd(); } @@ -934,22 +905,10 @@ uint32_t ThriftHiveMetastore_drop_database_args::read(::apache::thrift::protocol return xfer; } -uint32_t ThriftHiveMetastore_drop_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_catalogs_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_args"); - - xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 2); - xfer += oprot->writeBool(this->deleteData); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("cascade", ::apache::thrift::protocol::T_BOOL, 3); - xfer += oprot->writeBool(this->cascade); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalogs_args"); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -957,26 +916,14 @@ uint32_t ThriftHiveMetastore_drop_database_args::write(::apache::thrift::protoco } -ThriftHiveMetastore_drop_database_pargs::~ThriftHiveMetastore_drop_database_pargs() throw() { +ThriftHiveMetastore_get_catalogs_pargs::~ThriftHiveMetastore_get_catalogs_pargs() throw() { } -uint32_t ThriftHiveMetastore_drop_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_catalogs_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_pargs"); - - xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 2); - xfer += oprot->writeBool((*(this->deleteData))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("cascade", ::apache::thrift::protocol::T_BOOL, 3); - xfer += oprot->writeBool((*(this->cascade))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalogs_pargs"); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -984,11 +931,11 @@ uint32_t ThriftHiveMetastore_drop_database_pargs::write(::apache::thrift::protoc } -ThriftHiveMetastore_drop_database_result::~ThriftHiveMetastore_drop_database_result() throw() { +ThriftHiveMetastore_get_catalogs_result::~ThriftHiveMetastore_get_catalogs_result() throw() { } -uint32_t ThriftHiveMetastore_drop_database_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_catalogs_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1009,92 +956,14 @@ uint32_t ThriftHiveMetastore_drop_database_result::read(::apache::thrift::protoc } switch (fid) { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: + case 0: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; + xfer += this->success.read(iprot); + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t ThriftHiveMetastore_drop_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_result"); - - if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->o2.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o3) { - xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->o3.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_drop_database_presult::~ThriftHiveMetastore_drop_database_presult() throw() { -} - - -uint32_t ThriftHiveMetastore_drop_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -1103,22 +972,6 @@ uint32_t ThriftHiveMetastore_drop_database_presult::read(::apache::thrift::proto xfer += iprot->skip(ftype); } break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -1131,91 +984,32 @@ uint32_t ThriftHiveMetastore_drop_database_presult::read(::apache::thrift::proto return xfer; } +uint32_t ThriftHiveMetastore_get_catalogs_result::write(::apache::thrift::protocol::TProtocol* oprot) const { -ThriftHiveMetastore_get_databases_args::~ThriftHiveMetastore_get_databases_args() throw() { -} - - -uint32_t ThriftHiveMetastore_get_databases_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - xfer += iprot->readStructBegin(fname); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalogs_result"); - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->pattern); - this->__isset.pattern = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t ThriftHiveMetastore_get_databases_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_args"); - - xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->pattern); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_get_databases_pargs::~ThriftHiveMetastore_get_databases_pargs() throw() { -} - - -uint32_t ThriftHiveMetastore_get_databases_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_pargs"); - - xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->pattern))); - xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -ThriftHiveMetastore_get_databases_result::~ThriftHiveMetastore_get_databases_result() throw() { +ThriftHiveMetastore_get_catalogs_presult::~ThriftHiveMetastore_get_catalogs_presult() throw() { } -uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_catalogs_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1237,20 +1031,8 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size1157; - ::apache::thrift::protocol::TType _etype1160; - xfer += iprot->readListBegin(_etype1160, _size1157); - this->success.resize(_size1157); - uint32_t _i1161; - for (_i1161 = 0; _i1161 < _size1157; ++_i1161) - { - xfer += iprot->readString(this->success[_i1161]); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -1276,40 +1058,12 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc return xfer; } -uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_result"); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1162; - for (_iter1162 = this->success.begin(); _iter1162 != this->success.end(); ++_iter1162) - { - xfer += oprot->writeString((*_iter1162)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_get_databases_presult::~ThriftHiveMetastore_get_databases_presult() throw() { +ThriftHiveMetastore_drop_catalog_args::~ThriftHiveMetastore_drop_catalog_args() throw() { } -uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_catalog_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1330,30 +1084,10 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1163; - ::apache::thrift::protocol::TType _etype1166; - xfer += iprot->readListBegin(_etype1166, _size1163); - (*(this->success)).resize(_size1163); - uint32_t _i1167; - for (_i1167 = 0; _i1167 < _size1163; ++_i1167) - { - xfer += iprot->readString((*(this->success))[_i1167]); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; + xfer += this->catName.read(iprot); + this->__isset.catName = true; } else { xfer += iprot->skip(ftype); } @@ -1370,43 +1104,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto return xfer; } - -ThriftHiveMetastore_get_all_databases_args::~ThriftHiveMetastore_get_all_databases_args() throw() { -} - - -uint32_t ThriftHiveMetastore_get_all_databases_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - xfer += iprot->skip(ftype); - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t ThriftHiveMetastore_get_all_databases_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_catalog_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_catalog_args"); + + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->catName.write(oprot); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -1414,14 +1119,18 @@ uint32_t ThriftHiveMetastore_get_all_databases_args::write(::apache::thrift::pro } -ThriftHiveMetastore_get_all_databases_pargs::~ThriftHiveMetastore_get_all_databases_pargs() throw() { +ThriftHiveMetastore_drop_catalog_pargs::~ThriftHiveMetastore_drop_catalog_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_all_databases_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_catalog_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_catalog_pargs"); + + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->catName)).write(oprot); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -1429,11 +1138,11 @@ uint32_t ThriftHiveMetastore_get_all_databases_pargs::write(::apache::thrift::pr } -ThriftHiveMetastore_get_all_databases_result::~ThriftHiveMetastore_get_all_databases_result() throw() { +ThriftHiveMetastore_drop_catalog_result::~ThriftHiveMetastore_drop_catalog_result() throw() { } -uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_catalog_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1454,30 +1163,26 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size1168; - ::apache::thrift::protocol::TType _etype1171; - xfer += iprot->readListBegin(_etype1171, _size1168); - this->success.resize(_size1168); - uint32_t _i1172; - for (_i1172 = 0; _i1172 < _size1168; ++_i1172) - { - xfer += iprot->readString(this->success[_i1172]); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; } else { xfer += iprot->skip(ftype); } break; - case 1: + case 2: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; } else { xfer += iprot->skip(ftype); } @@ -1494,28 +1199,24 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr return xfer; } -uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_catalog_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_catalog_result"); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1173; - for (_iter1173 = this->success.begin(); _iter1173 != this->success.end(); ++_iter1173) - { - xfer += oprot->writeString((*_iter1173)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { + if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -1523,11 +1224,11 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p } -ThriftHiveMetastore_get_all_databases_presult::~ThriftHiveMetastore_get_all_databases_presult() throw() { +ThriftHiveMetastore_drop_catalog_presult::~ThriftHiveMetastore_drop_catalog_presult() throw() { } -uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_catalog_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1548,30 +1249,26 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1174; - ::apache::thrift::protocol::TType _etype1177; - xfer += iprot->readListBegin(_etype1177, _size1174); - (*(this->success)).resize(_size1174); - uint32_t _i1178; - for (_i1178 = 0; _i1178 < _size1174; ++_i1178) - { - xfer += iprot->readString((*(this->success))[_i1178]); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; } else { xfer += iprot->skip(ftype); } break; - case 1: + case 2: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; } else { xfer += iprot->skip(ftype); } @@ -1589,11 +1286,11 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p } -ThriftHiveMetastore_alter_database_args::~ThriftHiveMetastore_alter_database_args() throw() { +ThriftHiveMetastore_create_database_args::~ThriftHiveMetastore_create_database_args() throw() { } -uint32_t ThriftHiveMetastore_alter_database_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_database_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1615,17 +1312,9 @@ uint32_t ThriftHiveMetastore_alter_database_args::read(::apache::thrift::protoco switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->dbname); - this->__isset.dbname = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->db.read(iprot); - this->__isset.db = true; + xfer += this->database.read(iprot); + this->__isset.database = true; } else { xfer += iprot->skip(ftype); } @@ -1642,17 +1331,13 @@ uint32_t ThriftHiveMetastore_alter_database_args::read(::apache::thrift::protoco return xfer; } -uint32_t ThriftHiveMetastore_alter_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_args"); - - xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->dbname); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_args"); - xfer += oprot->writeFieldBegin("db", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->db.write(oprot); + xfer += oprot->writeFieldBegin("database", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->database.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -1661,21 +1346,17 @@ uint32_t ThriftHiveMetastore_alter_database_args::write(::apache::thrift::protoc } -ThriftHiveMetastore_alter_database_pargs::~ThriftHiveMetastore_alter_database_pargs() throw() { +ThriftHiveMetastore_create_database_pargs::~ThriftHiveMetastore_create_database_pargs() throw() { } -uint32_t ThriftHiveMetastore_alter_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_pargs"); - - xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->dbname))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_pargs"); - xfer += oprot->writeFieldBegin("db", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += (*(this->db)).write(oprot); + xfer += oprot->writeFieldBegin("database", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->database)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -1684,11 +1365,11 @@ uint32_t ThriftHiveMetastore_alter_database_pargs::write(::apache::thrift::proto } -ThriftHiveMetastore_alter_database_result::~ThriftHiveMetastore_alter_database_result() throw() { +ThriftHiveMetastore_create_database_result::~ThriftHiveMetastore_create_database_result() throw() { } -uint32_t ThriftHiveMetastore_alter_database_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_database_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1725,6 +1406,14 @@ uint32_t ThriftHiveMetastore_alter_database_result::read(::apache::thrift::proto xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -1737,11 +1426,11 @@ uint32_t ThriftHiveMetastore_alter_database_result::read(::apache::thrift::proto return xfer; } -uint32_t ThriftHiveMetastore_alter_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_result"); if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -1751,6 +1440,10 @@ uint32_t ThriftHiveMetastore_alter_database_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -1758,11 +1451,11 @@ uint32_t ThriftHiveMetastore_alter_database_result::write(::apache::thrift::prot } -ThriftHiveMetastore_alter_database_presult::~ThriftHiveMetastore_alter_database_presult() throw() { +ThriftHiveMetastore_create_database_presult::~ThriftHiveMetastore_create_database_presult() throw() { } -uint32_t ThriftHiveMetastore_alter_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1799,6 +1492,14 @@ uint32_t ThriftHiveMetastore_alter_database_presult::read(::apache::thrift::prot xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -1812,11 +1513,11 @@ uint32_t ThriftHiveMetastore_alter_database_presult::read(::apache::thrift::prot } -ThriftHiveMetastore_get_type_args::~ThriftHiveMetastore_get_type_args() throw() { +ThriftHiveMetastore_get_database_args::~ThriftHiveMetastore_get_database_args() throw() { } -uint32_t ThriftHiveMetastore_get_type_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_database_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1857,10 +1558,10 @@ uint32_t ThriftHiveMetastore_get_type_args::read(::apache::thrift::protocol::TPr return xfer; } -uint32_t ThriftHiveMetastore_get_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_args"); xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->name); @@ -1872,14 +1573,14 @@ uint32_t ThriftHiveMetastore_get_type_args::write(::apache::thrift::protocol::TP } -ThriftHiveMetastore_get_type_pargs::~ThriftHiveMetastore_get_type_pargs() throw() { +ThriftHiveMetastore_get_database_pargs::~ThriftHiveMetastore_get_database_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_pargs"); xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->name))); @@ -1891,11 +1592,11 @@ uint32_t ThriftHiveMetastore_get_type_pargs::write(::apache::thrift::protocol::T } -ThriftHiveMetastore_get_type_result::~ThriftHiveMetastore_get_type_result() throw() { +ThriftHiveMetastore_get_database_result::~ThriftHiveMetastore_get_database_result() throw() { } -uint32_t ThriftHiveMetastore_get_type_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_database_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1952,11 +1653,11 @@ uint32_t ThriftHiveMetastore_get_type_result::read(::apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_get_type_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -1977,11 +1678,11 @@ uint32_t ThriftHiveMetastore_get_type_result::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_get_type_presult::~ThriftHiveMetastore_get_type_presult() throw() { +ThriftHiveMetastore_get_database_presult::~ThriftHiveMetastore_get_database_presult() throw() { } -uint32_t ThriftHiveMetastore_get_type_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2039,11 +1740,11 @@ uint32_t ThriftHiveMetastore_get_type_presult::read(::apache::thrift::protocol:: } -ThriftHiveMetastore_create_type_args::~ThriftHiveMetastore_create_type_args() throw() { +ThriftHiveMetastore_drop_database_args::~ThriftHiveMetastore_drop_database_args() throw() { } -uint32_t ThriftHiveMetastore_create_type_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_database_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2065,9 +1766,25 @@ uint32_t ThriftHiveMetastore_create_type_args::read(::apache::thrift::protocol:: switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->type.read(iprot); - this->__isset.type = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->name); + this->__isset.name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->deleteData); + this->__isset.deleteData = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->cascade); + this->__isset.cascade = true; } else { xfer += iprot->skip(ftype); } @@ -2084,13 +1801,21 @@ uint32_t ThriftHiveMetastore_create_type_args::read(::apache::thrift::protocol:: return xfer; } -uint32_t ThriftHiveMetastore_create_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_type_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_args"); - xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->type.write(oprot); + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 2); + xfer += oprot->writeBool(this->deleteData); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("cascade", ::apache::thrift::protocol::T_BOOL, 3); + xfer += oprot->writeBool(this->cascade); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -2099,17 +1824,25 @@ uint32_t ThriftHiveMetastore_create_type_args::write(::apache::thrift::protocol: } -ThriftHiveMetastore_create_type_pargs::~ThriftHiveMetastore_create_type_pargs() throw() { +ThriftHiveMetastore_drop_database_pargs::~ThriftHiveMetastore_drop_database_pargs() throw() { } -uint32_t ThriftHiveMetastore_create_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_type_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_pargs"); - xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->type)).write(oprot); + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 2); + xfer += oprot->writeBool((*(this->deleteData))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("cascade", ::apache::thrift::protocol::T_BOOL, 3); + xfer += oprot->writeBool((*(this->cascade))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -2118,11 +1851,11 @@ uint32_t ThriftHiveMetastore_create_type_pargs::write(::apache::thrift::protocol } -ThriftHiveMetastore_create_type_result::~ThriftHiveMetastore_create_type_result() throw() { +ThriftHiveMetastore_drop_database_result::~ThriftHiveMetastore_drop_database_result() throw() { } -uint32_t ThriftHiveMetastore_create_type_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_database_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2143,14 +1876,6 @@ uint32_t ThriftHiveMetastore_create_type_result::read(::apache::thrift::protocol } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->success); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -2187,17 +1912,13 @@ uint32_t ThriftHiveMetastore_create_type_result::read(::apache::thrift::protocol return xfer; } -uint32_t ThriftHiveMetastore_create_type_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_type_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_result"); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); - xfer += oprot->writeBool(this->success); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { + if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); @@ -2216,11 +1937,11 @@ uint32_t ThriftHiveMetastore_create_type_result::write(::apache::thrift::protoco } -ThriftHiveMetastore_create_type_presult::~ThriftHiveMetastore_create_type_presult() throw() { +ThriftHiveMetastore_drop_database_presult::~ThriftHiveMetastore_drop_database_presult() throw() { } -uint32_t ThriftHiveMetastore_create_type_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2241,14 +1962,6 @@ uint32_t ThriftHiveMetastore_create_type_presult::read(::apache::thrift::protoco } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool((*(this->success))); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -2286,11 +1999,11 @@ uint32_t ThriftHiveMetastore_create_type_presult::read(::apache::thrift::protoco } -ThriftHiveMetastore_drop_type_args::~ThriftHiveMetastore_drop_type_args() throw() { +ThriftHiveMetastore_get_databases_args::~ThriftHiveMetastore_get_databases_args() throw() { } -uint32_t ThriftHiveMetastore_drop_type_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_databases_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2313,8 +2026,8 @@ uint32_t ThriftHiveMetastore_drop_type_args::read(::apache::thrift::protocol::TP { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->type); - this->__isset.type = true; + xfer += iprot->readString(this->pattern); + this->__isset.pattern = true; } else { xfer += iprot->skip(ftype); } @@ -2331,13 +2044,13 @@ uint32_t ThriftHiveMetastore_drop_type_args::read(::apache::thrift::protocol::TP return xfer; } -uint32_t ThriftHiveMetastore_drop_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_databases_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_type_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_args"); - xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->type); + xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->pattern); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -2346,17 +2059,17 @@ uint32_t ThriftHiveMetastore_drop_type_args::write(::apache::thrift::protocol::T } -ThriftHiveMetastore_drop_type_pargs::~ThriftHiveMetastore_drop_type_pargs() throw() { +ThriftHiveMetastore_get_databases_pargs::~ThriftHiveMetastore_get_databases_pargs() throw() { } -uint32_t ThriftHiveMetastore_drop_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_databases_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_type_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_pargs"); - xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->type))); + xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->pattern))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -2365,11 +2078,11 @@ uint32_t ThriftHiveMetastore_drop_type_pargs::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_drop_type_result::~ThriftHiveMetastore_drop_type_result() throw() { +ThriftHiveMetastore_get_databases_result::~ThriftHiveMetastore_get_databases_result() throw() { } -uint32_t ThriftHiveMetastore_drop_type_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2391,8 +2104,20 @@ uint32_t ThriftHiveMetastore_drop_type_result::read(::apache::thrift::protocol:: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->success); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1175; + ::apache::thrift::protocol::TType _etype1178; + xfer += iprot->readListBegin(_etype1178, _size1175); + this->success.resize(_size1175); + uint32_t _i1179; + for (_i1179 = 0; _i1179 < _size1175; ++_i1179) + { + xfer += iprot->readString(this->success[_i1179]); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -2406,14 +2131,6 @@ uint32_t ThriftHiveMetastore_drop_type_result::read(::apache::thrift::protocol:: xfer += iprot->skip(ftype); } break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -2426,24 +2143,28 @@ uint32_t ThriftHiveMetastore_drop_type_result::read(::apache::thrift::protocol:: return xfer; } -uint32_t ThriftHiveMetastore_drop_type_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_type_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); - xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); + std::vector ::const_iterator _iter1180; + for (_iter1180 = this->success.begin(); _iter1180 != this->success.end(); ++_iter1180) + { + xfer += oprot->writeString((*_iter1180)); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->o2.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -2451,11 +2172,11 @@ uint32_t ThriftHiveMetastore_drop_type_result::write(::apache::thrift::protocol: } -ThriftHiveMetastore_drop_type_presult::~ThriftHiveMetastore_drop_type_presult() throw() { +ThriftHiveMetastore_get_databases_presult::~ThriftHiveMetastore_get_databases_presult() throw() { } -uint32_t ThriftHiveMetastore_drop_type_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2477,8 +2198,20 @@ uint32_t ThriftHiveMetastore_drop_type_presult::read(::apache::thrift::protocol: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool((*(this->success))); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1181; + ::apache::thrift::protocol::TType _etype1184; + xfer += iprot->readListBegin(_etype1184, _size1181); + (*(this->success)).resize(_size1181); + uint32_t _i1185; + for (_i1185 = 0; _i1185 < _size1181; ++_i1185) + { + xfer += iprot->readString((*(this->success))[_i1185]); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -2492,14 +2225,6 @@ uint32_t ThriftHiveMetastore_drop_type_presult::read(::apache::thrift::protocol: xfer += iprot->skip(ftype); } break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -2513,11 +2238,11 @@ uint32_t ThriftHiveMetastore_drop_type_presult::read(::apache::thrift::protocol: } -ThriftHiveMetastore_get_type_all_args::~ThriftHiveMetastore_get_type_all_args() throw() { +ThriftHiveMetastore_get_all_databases_args::~ThriftHiveMetastore_get_all_databases_args() throw() { } -uint32_t ThriftHiveMetastore_get_type_all_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_all_databases_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2536,20 +2261,7 @@ uint32_t ThriftHiveMetastore_get_type_all_args::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->name); - this->__isset.name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } + xfer += iprot->skip(ftype); xfer += iprot->readFieldEnd(); } @@ -2558,14 +2270,10 @@ uint32_t ThriftHiveMetastore_get_type_all_args::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_get_type_all_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_all_databases_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_all_args"); - - xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->name); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_args"); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -2573,18 +2281,14 @@ uint32_t ThriftHiveMetastore_get_type_all_args::write(::apache::thrift::protocol } -ThriftHiveMetastore_get_type_all_pargs::~ThriftHiveMetastore_get_type_all_pargs() throw() { +ThriftHiveMetastore_get_all_databases_pargs::~ThriftHiveMetastore_get_all_databases_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_type_all_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_all_databases_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_all_pargs"); - - xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->name))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_pargs"); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -2592,11 +2296,11 @@ uint32_t ThriftHiveMetastore_get_type_all_pargs::write(::apache::thrift::protoco } -ThriftHiveMetastore_get_type_all_result::~ThriftHiveMetastore_get_type_all_result() throw() { +ThriftHiveMetastore_get_all_databases_result::~ThriftHiveMetastore_get_all_databases_result() throw() { } -uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2618,22 +2322,19 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_MAP) { + if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1179; - ::apache::thrift::protocol::TType _ktype1180; - ::apache::thrift::protocol::TType _vtype1181; - xfer += iprot->readMapBegin(_ktype1180, _vtype1181, _size1179); - uint32_t _i1183; - for (_i1183 = 0; _i1183 < _size1179; ++_i1183) + uint32_t _size1186; + ::apache::thrift::protocol::TType _etype1189; + xfer += iprot->readListBegin(_etype1189, _size1186); + this->success.resize(_size1186); + uint32_t _i1190; + for (_i1190 = 0; _i1190 < _size1186; ++_i1190) { - std::string _key1184; - xfer += iprot->readString(_key1184); - Type& _val1185 = this->success[_key1184]; - xfer += _val1185.read(iprot); + xfer += iprot->readString(this->success[_i1190]); } - xfer += iprot->readMapEnd(); + xfer += iprot->readListEnd(); } this->__isset.success = true; } else { @@ -2642,8 +2343,8 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; + xfer += this->o1.read(iprot); + this->__isset.o1 = true; } else { xfer += iprot->skip(ftype); } @@ -2660,28 +2361,27 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco return xfer; } -uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_all_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { - xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter1186; - for (_iter1186 = this->success.begin(); _iter1186 != this->success.end(); ++_iter1186) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); + std::vector ::const_iterator _iter1191; + for (_iter1191 = this->success.begin(); _iter1191 != this->success.end(); ++_iter1191) { - xfer += oprot->writeString(_iter1186->first); - xfer += _iter1186->second.write(oprot); + xfer += oprot->writeString((*_iter1191)); } - xfer += oprot->writeMapEnd(); + xfer += oprot->writeListEnd(); } xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o2.write(oprot); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); @@ -2690,11 +2390,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc } -ThriftHiveMetastore_get_type_all_presult::~ThriftHiveMetastore_get_type_all_presult() throw() { +ThriftHiveMetastore_get_all_databases_presult::~ThriftHiveMetastore_get_all_databases_presult() throw() { } -uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2716,22 +2416,19 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_MAP) { + if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1187; - ::apache::thrift::protocol::TType _ktype1188; - ::apache::thrift::protocol::TType _vtype1189; - xfer += iprot->readMapBegin(_ktype1188, _vtype1189, _size1187); - uint32_t _i1191; - for (_i1191 = 0; _i1191 < _size1187; ++_i1191) + uint32_t _size1192; + ::apache::thrift::protocol::TType _etype1195; + xfer += iprot->readListBegin(_etype1195, _size1192); + (*(this->success)).resize(_size1192); + uint32_t _i1196; + for (_i1196 = 0; _i1196 < _size1192; ++_i1196) { - std::string _key1192; - xfer += iprot->readString(_key1192); - Type& _val1193 = (*(this->success))[_key1192]; - xfer += _val1193.read(iprot); + xfer += iprot->readString((*(this->success))[_i1196]); } - xfer += iprot->readMapEnd(); + xfer += iprot->readListEnd(); } this->__isset.success = true; } else { @@ -2740,8 +2437,8 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; + xfer += this->o1.read(iprot); + this->__isset.o1 = true; } else { xfer += iprot->skip(ftype); } @@ -2759,11 +2456,11 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc } -ThriftHiveMetastore_get_fields_args::~ThriftHiveMetastore_get_fields_args() throw() { +ThriftHiveMetastore_alter_database_args::~ThriftHiveMetastore_alter_database_args() throw() { } -uint32_t ThriftHiveMetastore_get_fields_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_alter_database_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2786,16 +2483,16 @@ uint32_t ThriftHiveMetastore_get_fields_args::read(::apache::thrift::protocol::T { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->db_name); - this->__isset.db_name = true; + xfer += iprot->readString(this->dbname); + this->__isset.dbname = true; } else { xfer += iprot->skip(ftype); } break; case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->table_name); - this->__isset.table_name = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->db.read(iprot); + this->__isset.db = true; } else { xfer += iprot->skip(ftype); } @@ -2812,17 +2509,17 @@ uint32_t ThriftHiveMetastore_get_fields_args::read(::apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_get_fields_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_alter_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_args"); - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->table_name); + xfer += oprot->writeFieldBegin("db", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->db.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -2831,21 +2528,21 @@ uint32_t ThriftHiveMetastore_get_fields_args::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_get_fields_pargs::~ThriftHiveMetastore_get_fields_pargs() throw() { +ThriftHiveMetastore_alter_database_pargs::~ThriftHiveMetastore_alter_database_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_fields_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_alter_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_pargs"); - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->dbname))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->table_name))); + xfer += oprot->writeFieldBegin("db", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += (*(this->db)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -2854,11 +2551,11 @@ uint32_t ThriftHiveMetastore_get_fields_pargs::write(::apache::thrift::protocol: } -ThriftHiveMetastore_get_fields_result::~ThriftHiveMetastore_get_fields_result() throw() { +ThriftHiveMetastore_alter_database_result::~ThriftHiveMetastore_alter_database_result() throw() { } -uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_alter_database_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2879,26 +2576,6 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size1194; - ::apache::thrift::protocol::TType _etype1197; - xfer += iprot->readListBegin(_etype1197, _size1194); - this->success.resize(_size1194); - uint32_t _i1198; - for (_i1198 = 0; _i1198 < _size1194; ++_i1198) - { - xfer += this->success[_i1198].read(iprot); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -2915,14 +2592,6 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -2935,25 +2604,13 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_alter_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_result"); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1199; - for (_iter1199 = this->success.begin(); _iter1199 != this->success.end(); ++_iter1199) - { - xfer += (*_iter1199).write(oprot); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { + if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); @@ -2961,10 +2618,6 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o3) { - xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->o3.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -2972,11 +2625,11 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol } -ThriftHiveMetastore_get_fields_presult::~ThriftHiveMetastore_get_fields_presult() throw() { +ThriftHiveMetastore_alter_database_presult::~ThriftHiveMetastore_alter_database_presult() throw() { } -uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_alter_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2997,26 +2650,6 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1200; - ::apache::thrift::protocol::TType _etype1203; - xfer += iprot->readListBegin(_etype1203, _size1200); - (*(this->success)).resize(_size1200); - uint32_t _i1204; - for (_i1204 = 0; _i1204 < _size1200; ++_i1204) - { - xfer += (*(this->success))[_i1204].read(iprot); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -3033,14 +2666,6 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -3054,11 +2679,11 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol } -ThriftHiveMetastore_get_fields_with_environment_context_args::~ThriftHiveMetastore_get_fields_with_environment_context_args() throw() { +ThriftHiveMetastore_get_type_args::~ThriftHiveMetastore_get_type_args() throw() { } -uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_type_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3081,24 +2706,8 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::read(::ap { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->db_name); - this->__isset.db_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->table_name); - this->__isset.table_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->environment_context.read(iprot); - this->__isset.environment_context = true; + xfer += iprot->readString(this->name); + this->__isset.name = true; } else { xfer += iprot->skip(ftype); } @@ -3115,21 +2724,13 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::read(::ap return xfer; } -uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_with_environment_context_args"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->db_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->table_name); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_args"); - xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->environment_context.write(oprot); + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -3138,25 +2739,17 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::write(::a } -ThriftHiveMetastore_get_fields_with_environment_context_pargs::~ThriftHiveMetastore_get_fields_with_environment_context_pargs() throw() { +ThriftHiveMetastore_get_type_pargs::~ThriftHiveMetastore_get_type_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_fields_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_with_environment_context_pargs"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->db_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->table_name))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_pargs"); - xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += (*(this->environment_context)).write(oprot); + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->name))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -3165,11 +2758,11 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_pargs::write(:: } -ThriftHiveMetastore_get_fields_with_environment_context_result::~ThriftHiveMetastore_get_fields_with_environment_context_result() throw() { +ThriftHiveMetastore_get_type_result::~ThriftHiveMetastore_get_type_result() throw() { } -uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_type_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3191,20 +2784,8 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size1205; - ::apache::thrift::protocol::TType _etype1208; - xfer += iprot->readListBegin(_etype1208, _size1205); - this->success.resize(_size1205); - uint32_t _i1209; - for (_i1209 = 0; _i1209 < _size1205; ++_i1209) - { - xfer += this->success[_i1209].read(iprot); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -3226,14 +2807,6 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -3246,23 +2819,15 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: return xfer; } -uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_type_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_with_environment_context_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1210; - for (_iter1210 = this->success.begin(); _iter1210 != this->success.end(); ++_iter1210) - { - xfer += (*_iter1210).write(oprot); - } - xfer += oprot->writeListEnd(); - } + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -3272,10 +2837,6 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o3) { - xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->o3.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -3283,11 +2844,11 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: } -ThriftHiveMetastore_get_fields_with_environment_context_presult::~ThriftHiveMetastore_get_fields_with_environment_context_presult() throw() { +ThriftHiveMetastore_get_type_presult::~ThriftHiveMetastore_get_type_presult() throw() { } -uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_type_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3309,20 +2870,8 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1211; - ::apache::thrift::protocol::TType _etype1214; - xfer += iprot->readListBegin(_etype1214, _size1211); - (*(this->success)).resize(_size1211); - uint32_t _i1215; - for (_i1215 = 0; _i1215 < _size1211; ++_i1215) - { - xfer += (*(this->success))[_i1215].read(iprot); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -3344,14 +2893,6 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -3365,11 +2906,11 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: } -ThriftHiveMetastore_get_schema_args::~ThriftHiveMetastore_get_schema_args() throw() { +ThriftHiveMetastore_create_type_args::~ThriftHiveMetastore_create_type_args() throw() { } -uint32_t ThriftHiveMetastore_get_schema_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_type_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3391,17 +2932,9 @@ uint32_t ThriftHiveMetastore_get_schema_args::read(::apache::thrift::protocol::T switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->db_name); - this->__isset.db_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->table_name); - this->__isset.table_name = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->type.read(iprot); + this->__isset.type = true; } else { xfer += iprot->skip(ftype); } @@ -3418,17 +2951,13 @@ uint32_t ThriftHiveMetastore_get_schema_args::read(::apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_get_schema_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_args"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->db_name); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_type_args"); - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->table_name); + xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->type.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -3437,21 +2966,17 @@ uint32_t ThriftHiveMetastore_get_schema_args::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_get_schema_pargs::~ThriftHiveMetastore_get_schema_pargs() throw() { +ThriftHiveMetastore_create_type_pargs::~ThriftHiveMetastore_create_type_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_schema_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_pargs"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->db_name))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_type_pargs"); - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->table_name))); + xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->type)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -3460,11 +2985,11 @@ uint32_t ThriftHiveMetastore_get_schema_pargs::write(::apache::thrift::protocol: } -ThriftHiveMetastore_get_schema_result::~ThriftHiveMetastore_get_schema_result() throw() { +ThriftHiveMetastore_create_type_result::~ThriftHiveMetastore_create_type_result() throw() { } -uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_type_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3486,20 +3011,8 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size1216; - ::apache::thrift::protocol::TType _etype1219; - xfer += iprot->readListBegin(_etype1219, _size1216); - this->success.resize(_size1216); - uint32_t _i1220; - for (_i1220 = 0; _i1220 < _size1216; ++_i1220) - { - xfer += this->success[_i1220].read(iprot); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->success); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -3541,23 +3054,15 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_type_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_type_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1221; - for (_iter1221 = this->success.begin(); _iter1221 != this->success.end(); ++_iter1221) - { - xfer += (*_iter1221).write(oprot); - } - xfer += oprot->writeListEnd(); - } + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); + xfer += oprot->writeBool(this->success); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -3578,11 +3083,11 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol } -ThriftHiveMetastore_get_schema_presult::~ThriftHiveMetastore_get_schema_presult() throw() { +ThriftHiveMetastore_create_type_presult::~ThriftHiveMetastore_create_type_presult() throw() { } -uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_type_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3604,20 +3109,8 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1222; - ::apache::thrift::protocol::TType _etype1225; - xfer += iprot->readListBegin(_etype1225, _size1222); - (*(this->success)).resize(_size1222); - uint32_t _i1226; - for (_i1226 = 0; _i1226 < _size1222; ++_i1226) - { - xfer += (*(this->success))[_i1226].read(iprot); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool((*(this->success))); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -3660,11 +3153,11 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol } -ThriftHiveMetastore_get_schema_with_environment_context_args::~ThriftHiveMetastore_get_schema_with_environment_context_args() throw() { +ThriftHiveMetastore_drop_type_args::~ThriftHiveMetastore_drop_type_args() throw() { } -uint32_t ThriftHiveMetastore_get_schema_with_environment_context_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_type_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3687,24 +3180,8 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_args::read(::ap { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->db_name); - this->__isset.db_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->table_name); - this->__isset.table_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->environment_context.read(iprot); - this->__isset.environment_context = true; + xfer += iprot->readString(this->type); + this->__isset.type = true; } else { xfer += iprot->skip(ftype); } @@ -3721,21 +3198,13 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_args::read(::ap return xfer; } -uint32_t ThriftHiveMetastore_get_schema_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_with_environment_context_args"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->db_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->table_name); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_type_args"); - xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->environment_context.write(oprot); + xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->type); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -3744,25 +3213,17 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_args::write(::a } -ThriftHiveMetastore_get_schema_with_environment_context_pargs::~ThriftHiveMetastore_get_schema_with_environment_context_pargs() throw() { +ThriftHiveMetastore_drop_type_pargs::~ThriftHiveMetastore_drop_type_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_schema_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_with_environment_context_pargs"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->db_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->table_name))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_type_pargs"); - xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += (*(this->environment_context)).write(oprot); + xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->type))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -3771,11 +3232,11 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_pargs::write(:: } -ThriftHiveMetastore_get_schema_with_environment_context_result::~ThriftHiveMetastore_get_schema_with_environment_context_result() throw() { +ThriftHiveMetastore_drop_type_result::~ThriftHiveMetastore_drop_type_result() throw() { } -uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_type_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3797,20 +3258,8 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size1227; - ::apache::thrift::protocol::TType _etype1230; - xfer += iprot->readListBegin(_etype1230, _size1227); - this->success.resize(_size1227); - uint32_t _i1231; - for (_i1231 = 0; _i1231 < _size1227; ++_i1231) - { - xfer += this->success[_i1231].read(iprot); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->success); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -3832,14 +3281,6 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -3852,23 +3293,15 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: return xfer; } -uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_type_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_with_environment_context_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_type_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1232; - for (_iter1232 = this->success.begin(); _iter1232 != this->success.end(); ++_iter1232) - { - xfer += (*_iter1232).write(oprot); - } - xfer += oprot->writeListEnd(); - } + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); + xfer += oprot->writeBool(this->success); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -3878,10 +3311,6 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o3) { - xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->o3.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -3889,11 +3318,11 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: } -ThriftHiveMetastore_get_schema_with_environment_context_presult::~ThriftHiveMetastore_get_schema_with_environment_context_presult() throw() { +ThriftHiveMetastore_drop_type_presult::~ThriftHiveMetastore_drop_type_presult() throw() { } -uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_type_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3915,20 +3344,8 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1233; - ::apache::thrift::protocol::TType _etype1236; - xfer += iprot->readListBegin(_etype1236, _size1233); - (*(this->success)).resize(_size1233); - uint32_t _i1237; - for (_i1237 = 0; _i1237 < _size1233; ++_i1237) - { - xfer += (*(this->success))[_i1237].read(iprot); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool((*(this->success))); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -3950,14 +3367,6 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -3971,11 +3380,11 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: } -ThriftHiveMetastore_create_table_args::~ThriftHiveMetastore_create_table_args() throw() { +ThriftHiveMetastore_get_type_all_args::~ThriftHiveMetastore_get_type_all_args() throw() { } -uint32_t ThriftHiveMetastore_create_table_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_type_all_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3997,9 +3406,9 @@ uint32_t ThriftHiveMetastore_create_table_args::read(::apache::thrift::protocol: switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->tbl.read(iprot); - this->__isset.tbl = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->name); + this->__isset.name = true; } else { xfer += iprot->skip(ftype); } @@ -4016,13 +3425,13 @@ uint32_t ThriftHiveMetastore_create_table_args::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_create_table_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_type_all_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_table_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_all_args"); - xfer += oprot->writeFieldBegin("tbl", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->tbl.write(oprot); + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -4031,17 +3440,17 @@ uint32_t ThriftHiveMetastore_create_table_args::write(::apache::thrift::protocol } -ThriftHiveMetastore_create_table_pargs::~ThriftHiveMetastore_create_table_pargs() throw() { +ThriftHiveMetastore_get_type_all_pargs::~ThriftHiveMetastore_get_type_all_pargs() throw() { } -uint32_t ThriftHiveMetastore_create_table_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_type_all_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_table_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_all_pargs"); - xfer += oprot->writeFieldBegin("tbl", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->tbl)).write(oprot); + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->name))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -4050,11 +3459,11 @@ uint32_t ThriftHiveMetastore_create_table_pargs::write(::apache::thrift::protoco } -ThriftHiveMetastore_create_table_result::~ThriftHiveMetastore_create_table_result() throw() { +ThriftHiveMetastore_get_type_all_result::~ThriftHiveMetastore_get_type_all_result() throw() { } -uint32_t ThriftHiveMetastore_create_table_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -4075,15 +3484,30 @@ uint32_t ThriftHiveMetastore_create_table_result::read(::apache::thrift::protoco } switch (fid) { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; + case 0: + if (ftype == ::apache::thrift::protocol::T_MAP) { + { + this->success.clear(); + uint32_t _size1197; + ::apache::thrift::protocol::TType _ktype1198; + ::apache::thrift::protocol::TType _vtype1199; + xfer += iprot->readMapBegin(_ktype1198, _vtype1199, _size1197); + uint32_t _i1201; + for (_i1201 = 0; _i1201 < _size1197; ++_i1201) + { + std::string _key1202; + xfer += iprot->readString(_key1202); + Type& _val1203 = this->success[_key1202]; + xfer += _val1203.read(iprot); + } + xfer += iprot->readMapEnd(); + } + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o2.read(iprot); this->__isset.o2 = true; @@ -4091,22 +3515,6 @@ uint32_t ThriftHiveMetastore_create_table_result::read(::apache::thrift::protoco xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o4.read(iprot); - this->__isset.o4 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -4119,28 +3527,29 @@ uint32_t ThriftHiveMetastore_create_table_result::read(::apache::thrift::protoco return xfer; } -uint32_t ThriftHiveMetastore_create_table_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_table_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_all_result"); - if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); + { + xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::map ::const_iterator _iter1204; + for (_iter1204 = this->success.begin(); _iter1204 != this->success.end(); ++_iter1204) + { + xfer += oprot->writeString(_iter1204->first); + xfer += _iter1204->second.write(oprot); + } + xfer += oprot->writeMapEnd(); + } xfer += oprot->writeFieldEnd(); } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o3) { - xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->o3.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o4) { - xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4); - xfer += this->o4.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -4148,11 +3557,11 @@ uint32_t ThriftHiveMetastore_create_table_result::write(::apache::thrift::protoc } -ThriftHiveMetastore_create_table_presult::~ThriftHiveMetastore_create_table_presult() throw() { +ThriftHiveMetastore_get_type_all_presult::~ThriftHiveMetastore_get_type_all_presult() throw() { } -uint32_t ThriftHiveMetastore_create_table_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -4173,15 +3582,30 @@ uint32_t ThriftHiveMetastore_create_table_presult::read(::apache::thrift::protoc } switch (fid) { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; + case 0: + if (ftype == ::apache::thrift::protocol::T_MAP) { + { + (*(this->success)).clear(); + uint32_t _size1205; + ::apache::thrift::protocol::TType _ktype1206; + ::apache::thrift::protocol::TType _vtype1207; + xfer += iprot->readMapBegin(_ktype1206, _vtype1207, _size1205); + uint32_t _i1209; + for (_i1209 = 0; _i1209 < _size1205; ++_i1209) + { + std::string _key1210; + xfer += iprot->readString(_key1210); + Type& _val1211 = (*(this->success))[_key1210]; + xfer += _val1211.read(iprot); + } + xfer += iprot->readMapEnd(); + } + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o2.read(iprot); this->__isset.o2 = true; @@ -4189,22 +3613,6 @@ uint32_t ThriftHiveMetastore_create_table_presult::read(::apache::thrift::protoc xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o4.read(iprot); - this->__isset.o4 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -4218,11 +3626,1470 @@ uint32_t ThriftHiveMetastore_create_table_presult::read(::apache::thrift::protoc } -ThriftHiveMetastore_create_table_with_environment_context_args::~ThriftHiveMetastore_create_table_with_environment_context_args() throw() { +ThriftHiveMetastore_get_fields_args::~ThriftHiveMetastore_get_fields_args() throw() { } -uint32_t ThriftHiveMetastore_create_table_with_environment_context_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_fields_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->table_name); + this->__isset.table_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_fields_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_args"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->table_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_fields_pargs::~ThriftHiveMetastore_get_fields_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_fields_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_pargs"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->table_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_fields_result::~ThriftHiveMetastore_get_fields_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1212; + ::apache::thrift::protocol::TType _etype1215; + xfer += iprot->readListBegin(_etype1215, _size1212); + this->success.resize(_size1212); + uint32_t _i1216; + for (_i1216 = 0; _i1216 < _size1212; ++_i1216) + { + xfer += this->success[_i1216].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1217; + for (_iter1217 = this->success.begin(); _iter1217 != this->success.end(); ++_iter1217) + { + xfer += (*_iter1217).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_fields_presult::~ThriftHiveMetastore_get_fields_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1218; + ::apache::thrift::protocol::TType _etype1221; + xfer += iprot->readListBegin(_etype1221, _size1218); + (*(this->success)).resize(_size1218); + uint32_t _i1222; + for (_i1222 = 0; _i1222 < _size1218; ++_i1222) + { + xfer += (*(this->success))[_i1222].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_fields_with_environment_context_args::~ThriftHiveMetastore_get_fields_with_environment_context_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->table_name); + this->__isset.table_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->environment_context.read(iprot); + this->__isset.environment_context = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_with_environment_context_args"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->table_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->environment_context.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_fields_with_environment_context_pargs::~ThriftHiveMetastore_get_fields_with_environment_context_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_fields_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_with_environment_context_pargs"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->table_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += (*(this->environment_context)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_fields_with_environment_context_result::~ThriftHiveMetastore_get_fields_with_environment_context_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1223; + ::apache::thrift::protocol::TType _etype1226; + xfer += iprot->readListBegin(_etype1226, _size1223); + this->success.resize(_size1223); + uint32_t _i1227; + for (_i1227 = 0; _i1227 < _size1223; ++_i1227) + { + xfer += this->success[_i1227].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_with_environment_context_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1228; + for (_iter1228 = this->success.begin(); _iter1228 != this->success.end(); ++_iter1228) + { + xfer += (*_iter1228).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_fields_with_environment_context_presult::~ThriftHiveMetastore_get_fields_with_environment_context_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1229; + ::apache::thrift::protocol::TType _etype1232; + xfer += iprot->readListBegin(_etype1232, _size1229); + (*(this->success)).resize(_size1229); + uint32_t _i1233; + for (_i1233 = 0; _i1233 < _size1229; ++_i1233) + { + xfer += (*(this->success))[_i1233].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_schema_args::~ThriftHiveMetastore_get_schema_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_schema_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->table_name); + this->__isset.table_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_schema_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_args"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->table_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_schema_pargs::~ThriftHiveMetastore_get_schema_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_schema_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_pargs"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->table_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_schema_result::~ThriftHiveMetastore_get_schema_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1234; + ::apache::thrift::protocol::TType _etype1237; + xfer += iprot->readListBegin(_etype1237, _size1234); + this->success.resize(_size1234); + uint32_t _i1238; + for (_i1238 = 0; _i1238 < _size1234; ++_i1238) + { + xfer += this->success[_i1238].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1239; + for (_iter1239 = this->success.begin(); _iter1239 != this->success.end(); ++_iter1239) + { + xfer += (*_iter1239).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_schema_presult::~ThriftHiveMetastore_get_schema_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1240; + ::apache::thrift::protocol::TType _etype1243; + xfer += iprot->readListBegin(_etype1243, _size1240); + (*(this->success)).resize(_size1240); + uint32_t _i1244; + for (_i1244 = 0; _i1244 < _size1240; ++_i1244) + { + xfer += (*(this->success))[_i1244].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_schema_with_environment_context_args::~ThriftHiveMetastore_get_schema_with_environment_context_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_schema_with_environment_context_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->table_name); + this->__isset.table_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->environment_context.read(iprot); + this->__isset.environment_context = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_schema_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_with_environment_context_args"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->table_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->environment_context.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_schema_with_environment_context_pargs::~ThriftHiveMetastore_get_schema_with_environment_context_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_schema_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_with_environment_context_pargs"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->table_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += (*(this->environment_context)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_schema_with_environment_context_result::~ThriftHiveMetastore_get_schema_with_environment_context_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1245; + ::apache::thrift::protocol::TType _etype1248; + xfer += iprot->readListBegin(_etype1248, _size1245); + this->success.resize(_size1245); + uint32_t _i1249; + for (_i1249 = 0; _i1249 < _size1245; ++_i1249) + { + xfer += this->success[_i1249].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_with_environment_context_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1250; + for (_iter1250 = this->success.begin(); _iter1250 != this->success.end(); ++_iter1250) + { + xfer += (*_iter1250).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_schema_with_environment_context_presult::~ThriftHiveMetastore_get_schema_with_environment_context_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1251; + ::apache::thrift::protocol::TType _etype1254; + xfer += iprot->readListBegin(_etype1254, _size1251); + (*(this->success)).resize(_size1251); + uint32_t _i1255; + for (_i1255 = 0; _i1255 < _size1251; ++_i1255) + { + xfer += (*(this->success))[_i1255].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_create_table_args::~ThriftHiveMetastore_create_table_args() throw() { +} + + +uint32_t ThriftHiveMetastore_create_table_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->tbl.read(iprot); + this->__isset.tbl = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_create_table_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_table_args"); + + xfer += oprot->writeFieldBegin("tbl", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->tbl.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_create_table_pargs::~ThriftHiveMetastore_create_table_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_create_table_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_table_pargs"); + + xfer += oprot->writeFieldBegin("tbl", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->tbl)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_create_table_result::~ThriftHiveMetastore_create_table_result() throw() { +} + + +uint32_t ThriftHiveMetastore_create_table_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_create_table_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_table_result"); + + if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o4) { + xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->o4.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_create_table_presult::~ThriftHiveMetastore_create_table_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_create_table_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_create_table_with_environment_context_args::~ThriftHiveMetastore_create_table_with_environment_context_args() throw() { +} + + +uint32_t ThriftHiveMetastore_create_table_with_environment_context_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -4518,14 +5385,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size1238; - ::apache::thrift::protocol::TType _etype1241; - xfer += iprot->readListBegin(_etype1241, _size1238); - this->primaryKeys.resize(_size1238); - uint32_t _i1242; - for (_i1242 = 0; _i1242 < _size1238; ++_i1242) + uint32_t _size1256; + ::apache::thrift::protocol::TType _etype1259; + xfer += iprot->readListBegin(_etype1259, _size1256); + this->primaryKeys.resize(_size1256); + uint32_t _i1260; + for (_i1260 = 0; _i1260 < _size1256; ++_i1260) { - xfer += this->primaryKeys[_i1242].read(iprot); + xfer += this->primaryKeys[_i1260].read(iprot); } xfer += iprot->readListEnd(); } @@ -4538,14 +5405,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size1243; - ::apache::thrift::protocol::TType _etype1246; - xfer += iprot->readListBegin(_etype1246, _size1243); - this->foreignKeys.resize(_size1243); - uint32_t _i1247; - for (_i1247 = 0; _i1247 < _size1243; ++_i1247) + uint32_t _size1261; + ::apache::thrift::protocol::TType _etype1264; + xfer += iprot->readListBegin(_etype1264, _size1261); + this->foreignKeys.resize(_size1261); + uint32_t _i1265; + for (_i1265 = 0; _i1265 < _size1261; ++_i1265) { - xfer += this->foreignKeys[_i1247].read(iprot); + xfer += this->foreignKeys[_i1265].read(iprot); } xfer += iprot->readListEnd(); } @@ -4558,14 +5425,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraints.clear(); - uint32_t _size1248; - ::apache::thrift::protocol::TType _etype1251; - xfer += iprot->readListBegin(_etype1251, _size1248); - this->uniqueConstraints.resize(_size1248); - uint32_t _i1252; - for (_i1252 = 0; _i1252 < _size1248; ++_i1252) + uint32_t _size1266; + ::apache::thrift::protocol::TType _etype1269; + xfer += iprot->readListBegin(_etype1269, _size1266); + this->uniqueConstraints.resize(_size1266); + uint32_t _i1270; + for (_i1270 = 0; _i1270 < _size1266; ++_i1270) { - xfer += this->uniqueConstraints[_i1252].read(iprot); + xfer += this->uniqueConstraints[_i1270].read(iprot); } xfer += iprot->readListEnd(); } @@ -4578,14 +5445,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraints.clear(); - uint32_t _size1253; - ::apache::thrift::protocol::TType _etype1256; - xfer += iprot->readListBegin(_etype1256, _size1253); - this->notNullConstraints.resize(_size1253); - uint32_t _i1257; - for (_i1257 = 0; _i1257 < _size1253; ++_i1257) + uint32_t _size1271; + ::apache::thrift::protocol::TType _etype1274; + xfer += iprot->readListBegin(_etype1274, _size1271); + this->notNullConstraints.resize(_size1271); + uint32_t _i1275; + for (_i1275 = 0; _i1275 < _size1271; ++_i1275) { - xfer += this->notNullConstraints[_i1257].read(iprot); + xfer += this->notNullConstraints[_i1275].read(iprot); } xfer += iprot->readListEnd(); } @@ -4598,14 +5465,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->defaultConstraints.clear(); - uint32_t _size1258; - ::apache::thrift::protocol::TType _etype1261; - xfer += iprot->readListBegin(_etype1261, _size1258); - this->defaultConstraints.resize(_size1258); - uint32_t _i1262; - for (_i1262 = 0; _i1262 < _size1258; ++_i1262) + uint32_t _size1276; + ::apache::thrift::protocol::TType _etype1279; + xfer += iprot->readListBegin(_etype1279, _size1276); + this->defaultConstraints.resize(_size1276); + uint32_t _i1280; + for (_i1280 = 0; _i1280 < _size1276; ++_i1280) { - xfer += this->defaultConstraints[_i1262].read(iprot); + xfer += this->defaultConstraints[_i1280].read(iprot); } xfer += iprot->readListEnd(); } @@ -4618,14 +5485,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->checkConstraints.clear(); - uint32_t _size1263; - ::apache::thrift::protocol::TType _etype1266; - xfer += iprot->readListBegin(_etype1266, _size1263); - this->checkConstraints.resize(_size1263); - uint32_t _i1267; - for (_i1267 = 0; _i1267 < _size1263; ++_i1267) + uint32_t _size1281; + ::apache::thrift::protocol::TType _etype1284; + xfer += iprot->readListBegin(_etype1284, _size1281); + this->checkConstraints.resize(_size1281); + uint32_t _i1285; + for (_i1285 = 0; _i1285 < _size1281; ++_i1285) { - xfer += this->checkConstraints[_i1267].read(iprot); + xfer += this->checkConstraints[_i1285].read(iprot); } xfer += iprot->readListEnd(); } @@ -4658,10 +5525,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter1268; - for (_iter1268 = this->primaryKeys.begin(); _iter1268 != this->primaryKeys.end(); ++_iter1268) + std::vector ::const_iterator _iter1286; + for (_iter1286 = this->primaryKeys.begin(); _iter1286 != this->primaryKeys.end(); ++_iter1286) { - xfer += (*_iter1268).write(oprot); + xfer += (*_iter1286).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4670,10 +5537,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter1269; - for (_iter1269 = this->foreignKeys.begin(); _iter1269 != this->foreignKeys.end(); ++_iter1269) + std::vector ::const_iterator _iter1287; + for (_iter1287 = this->foreignKeys.begin(); _iter1287 != this->foreignKeys.end(); ++_iter1287) { - xfer += (*_iter1269).write(oprot); + xfer += (*_iter1287).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4682,10 +5549,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraints.size())); - std::vector ::const_iterator _iter1270; - for (_iter1270 = this->uniqueConstraints.begin(); _iter1270 != this->uniqueConstraints.end(); ++_iter1270) + std::vector ::const_iterator _iter1288; + for (_iter1288 = this->uniqueConstraints.begin(); _iter1288 != this->uniqueConstraints.end(); ++_iter1288) { - xfer += (*_iter1270).write(oprot); + xfer += (*_iter1288).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4694,10 +5561,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraints.size())); - std::vector ::const_iterator _iter1271; - for (_iter1271 = this->notNullConstraints.begin(); _iter1271 != this->notNullConstraints.end(); ++_iter1271) + std::vector ::const_iterator _iter1289; + for (_iter1289 = this->notNullConstraints.begin(); _iter1289 != this->notNullConstraints.end(); ++_iter1289) { - xfer += (*_iter1271).write(oprot); + xfer += (*_iter1289).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4706,10 +5573,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->defaultConstraints.size())); - std::vector ::const_iterator _iter1272; - for (_iter1272 = this->defaultConstraints.begin(); _iter1272 != this->defaultConstraints.end(); ++_iter1272) + std::vector ::const_iterator _iter1290; + for (_iter1290 = this->defaultConstraints.begin(); _iter1290 != this->defaultConstraints.end(); ++_iter1290) { - xfer += (*_iter1272).write(oprot); + xfer += (*_iter1290).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4718,10 +5585,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("checkConstraints", ::apache::thrift::protocol::T_LIST, 7); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->checkConstraints.size())); - std::vector ::const_iterator _iter1273; - for (_iter1273 = this->checkConstraints.begin(); _iter1273 != this->checkConstraints.end(); ++_iter1273) + std::vector ::const_iterator _iter1291; + for (_iter1291 = this->checkConstraints.begin(); _iter1291 != this->checkConstraints.end(); ++_iter1291) { - xfer += (*_iter1273).write(oprot); + xfer += (*_iter1291).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4749,10 +5616,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->primaryKeys)).size())); - std::vector ::const_iterator _iter1274; - for (_iter1274 = (*(this->primaryKeys)).begin(); _iter1274 != (*(this->primaryKeys)).end(); ++_iter1274) + std::vector ::const_iterator _iter1292; + for (_iter1292 = (*(this->primaryKeys)).begin(); _iter1292 != (*(this->primaryKeys)).end(); ++_iter1292) { - xfer += (*_iter1274).write(oprot); + xfer += (*_iter1292).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4761,10 +5628,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->foreignKeys)).size())); - std::vector ::const_iterator _iter1275; - for (_iter1275 = (*(this->foreignKeys)).begin(); _iter1275 != (*(this->foreignKeys)).end(); ++_iter1275) + std::vector ::const_iterator _iter1293; + for (_iter1293 = (*(this->foreignKeys)).begin(); _iter1293 != (*(this->foreignKeys)).end(); ++_iter1293) { - xfer += (*_iter1275).write(oprot); + xfer += (*_iter1293).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4773,10 +5640,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->uniqueConstraints)).size())); - std::vector ::const_iterator _iter1276; - for (_iter1276 = (*(this->uniqueConstraints)).begin(); _iter1276 != (*(this->uniqueConstraints)).end(); ++_iter1276) + std::vector ::const_iterator _iter1294; + for (_iter1294 = (*(this->uniqueConstraints)).begin(); _iter1294 != (*(this->uniqueConstraints)).end(); ++_iter1294) { - xfer += (*_iter1276).write(oprot); + xfer += (*_iter1294).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4785,10 +5652,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->notNullConstraints)).size())); - std::vector ::const_iterator _iter1277; - for (_iter1277 = (*(this->notNullConstraints)).begin(); _iter1277 != (*(this->notNullConstraints)).end(); ++_iter1277) + std::vector ::const_iterator _iter1295; + for (_iter1295 = (*(this->notNullConstraints)).begin(); _iter1295 != (*(this->notNullConstraints)).end(); ++_iter1295) { - xfer += (*_iter1277).write(oprot); + xfer += (*_iter1295).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4797,10 +5664,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->defaultConstraints)).size())); - std::vector ::const_iterator _iter1278; - for (_iter1278 = (*(this->defaultConstraints)).begin(); _iter1278 != (*(this->defaultConstraints)).end(); ++_iter1278) + std::vector ::const_iterator _iter1296; + for (_iter1296 = (*(this->defaultConstraints)).begin(); _iter1296 != (*(this->defaultConstraints)).end(); ++_iter1296) { - xfer += (*_iter1278).write(oprot); + xfer += (*_iter1296).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4809,10 +5676,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("checkConstraints", ::apache::thrift::protocol::T_LIST, 7); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->checkConstraints)).size())); - std::vector ::const_iterator _iter1279; - for (_iter1279 = (*(this->checkConstraints)).begin(); _iter1279 != (*(this->checkConstraints)).end(); ++_iter1279) + std::vector ::const_iterator _iter1297; + for (_iter1297 = (*(this->checkConstraints)).begin(); _iter1297 != (*(this->checkConstraints)).end(); ++_iter1297) { - xfer += (*_iter1279).write(oprot); + xfer += (*_iter1297).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6980,14 +7847,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size1280; - ::apache::thrift::protocol::TType _etype1283; - xfer += iprot->readListBegin(_etype1283, _size1280); - this->partNames.resize(_size1280); - uint32_t _i1284; - for (_i1284 = 0; _i1284 < _size1280; ++_i1284) + uint32_t _size1298; + ::apache::thrift::protocol::TType _etype1301; + xfer += iprot->readListBegin(_etype1301, _size1298); + this->partNames.resize(_size1298); + uint32_t _i1302; + for (_i1302 = 0; _i1302 < _size1298; ++_i1302) { - xfer += iprot->readString(this->partNames[_i1284]); + xfer += iprot->readString(this->partNames[_i1302]); } xfer += iprot->readListEnd(); } @@ -7024,10 +7891,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter1285; - for (_iter1285 = this->partNames.begin(); _iter1285 != this->partNames.end(); ++_iter1285) + std::vector ::const_iterator _iter1303; + for (_iter1303 = this->partNames.begin(); _iter1303 != this->partNames.end(); ++_iter1303) { - xfer += oprot->writeString((*_iter1285)); + xfer += oprot->writeString((*_iter1303)); } xfer += oprot->writeListEnd(); } @@ -7059,10 +7926,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->partNames)).size())); - std::vector ::const_iterator _iter1286; - for (_iter1286 = (*(this->partNames)).begin(); _iter1286 != (*(this->partNames)).end(); ++_iter1286) + std::vector ::const_iterator _iter1304; + for (_iter1304 = (*(this->partNames)).begin(); _iter1304 != (*(this->partNames)).end(); ++_iter1304) { - xfer += oprot->writeString((*_iter1286)); + xfer += oprot->writeString((*_iter1304)); } xfer += oprot->writeListEnd(); } @@ -7306,14 +8173,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1287; - ::apache::thrift::protocol::TType _etype1290; - xfer += iprot->readListBegin(_etype1290, _size1287); - this->success.resize(_size1287); - uint32_t _i1291; - for (_i1291 = 0; _i1291 < _size1287; ++_i1291) + uint32_t _size1305; + ::apache::thrift::protocol::TType _etype1308; + xfer += iprot->readListBegin(_etype1308, _size1305); + this->success.resize(_size1305); + uint32_t _i1309; + for (_i1309 = 0; _i1309 < _size1305; ++_i1309) { - xfer += iprot->readString(this->success[_i1291]); + xfer += iprot->readString(this->success[_i1309]); } xfer += iprot->readListEnd(); } @@ -7352,10 +8219,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1292; - for (_iter1292 = this->success.begin(); _iter1292 != this->success.end(); ++_iter1292) + std::vector ::const_iterator _iter1310; + for (_iter1310 = this->success.begin(); _iter1310 != this->success.end(); ++_iter1310) { - xfer += oprot->writeString((*_iter1292)); + xfer += oprot->writeString((*_iter1310)); } xfer += oprot->writeListEnd(); } @@ -7400,14 +8267,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1293; - ::apache::thrift::protocol::TType _etype1296; - xfer += iprot->readListBegin(_etype1296, _size1293); - (*(this->success)).resize(_size1293); - uint32_t _i1297; - for (_i1297 = 0; _i1297 < _size1293; ++_i1297) + uint32_t _size1311; + ::apache::thrift::protocol::TType _etype1314; + xfer += iprot->readListBegin(_etype1314, _size1311); + (*(this->success)).resize(_size1311); + uint32_t _i1315; + for (_i1315 = 0; _i1315 < _size1311; ++_i1315) { - xfer += iprot->readString((*(this->success))[_i1297]); + xfer += iprot->readString((*(this->success))[_i1315]); } xfer += iprot->readListEnd(); } @@ -7577,14 +8444,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1298; - ::apache::thrift::protocol::TType _etype1301; - xfer += iprot->readListBegin(_etype1301, _size1298); - this->success.resize(_size1298); - uint32_t _i1302; - for (_i1302 = 0; _i1302 < _size1298; ++_i1302) + uint32_t _size1316; + ::apache::thrift::protocol::TType _etype1319; + xfer += iprot->readListBegin(_etype1319, _size1316); + this->success.resize(_size1316); + uint32_t _i1320; + for (_i1320 = 0; _i1320 < _size1316; ++_i1320) { - xfer += iprot->readString(this->success[_i1302]); + xfer += iprot->readString(this->success[_i1320]); } xfer += iprot->readListEnd(); } @@ -7623,10 +8490,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift:: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1303; - for (_iter1303 = this->success.begin(); _iter1303 != this->success.end(); ++_iter1303) + std::vector ::const_iterator _iter1321; + for (_iter1321 = this->success.begin(); _iter1321 != this->success.end(); ++_iter1321) { - xfer += oprot->writeString((*_iter1303)); + xfer += oprot->writeString((*_iter1321)); } xfer += oprot->writeListEnd(); } @@ -7671,14 +8538,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1304; - ::apache::thrift::protocol::TType _etype1307; - xfer += iprot->readListBegin(_etype1307, _size1304); - (*(this->success)).resize(_size1304); - uint32_t _i1308; - for (_i1308 = 0; _i1308 < _size1304; ++_i1308) + uint32_t _size1322; + ::apache::thrift::protocol::TType _etype1325; + xfer += iprot->readListBegin(_etype1325, _size1322); + (*(this->success)).resize(_size1322); + uint32_t _i1326; + for (_i1326 = 0; _i1326 < _size1322; ++_i1326) { - xfer += iprot->readString((*(this->success))[_i1308]); + xfer += iprot->readString((*(this->success))[_i1326]); } xfer += iprot->readListEnd(); } @@ -7816,305 +8683,6 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1309; - ::apache::thrift::protocol::TType _etype1312; - xfer += iprot->readListBegin(_etype1312, _size1309); - this->success.resize(_size1309); - uint32_t _i1313; - for (_i1313 = 0; _i1313 < _size1309; ++_i1313) - { - xfer += iprot->readString(this->success[_i1313]); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_materialized_views_for_rewriting_result"); - - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1314; - for (_iter1314 = this->success.begin(); _iter1314 != this->success.end(); ++_iter1314) - { - xfer += oprot->writeString((*_iter1314)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_get_materialized_views_for_rewriting_presult::~ThriftHiveMetastore_get_materialized_views_for_rewriting_presult() throw() { -} - - -uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1315; - ::apache::thrift::protocol::TType _etype1318; - xfer += iprot->readListBegin(_etype1318, _size1315); - (*(this->success)).resize(_size1315); - uint32_t _i1319; - for (_i1319 = 0; _i1319 < _size1315; ++_i1319) - { - xfer += iprot->readString((*(this->success))[_i1319]); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - - -ThriftHiveMetastore_get_table_meta_args::~ThriftHiveMetastore_get_table_meta_args() throw() { -} - - -uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->db_patterns); - this->__isset.db_patterns = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->tbl_patterns); - this->__isset.tbl_patterns = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->tbl_types.clear(); - uint32_t _size1320; - ::apache::thrift::protocol::TType _etype1323; - xfer += iprot->readListBegin(_etype1323, _size1320); - this->tbl_types.resize(_size1320); - uint32_t _i1324; - for (_i1324 = 0; _i1324 < _size1320; ++_i1324) - { - xfer += iprot->readString(this->tbl_types[_i1324]); - } - xfer += iprot->readListEnd(); - } - this->__isset.tbl_types = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_meta_args"); - - xfer += oprot->writeFieldBegin("db_patterns", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->db_patterns); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_patterns", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->tbl_patterns); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_types.size())); - std::vector ::const_iterator _iter1325; - for (_iter1325 = this->tbl_types.begin(); _iter1325 != this->tbl_types.end(); ++_iter1325) - { - xfer += oprot->writeString((*_iter1325)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_get_table_meta_pargs::~ThriftHiveMetastore_get_table_meta_pargs() throw() { -} - - -uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_meta_pargs"); - - xfer += oprot->writeFieldBegin("db_patterns", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->db_patterns))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_patterns", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->tbl_patterns))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_types)).size())); - std::vector ::const_iterator _iter1326; - for (_iter1326 = (*(this->tbl_types)).begin(); _iter1326 != (*(this->tbl_types)).end(); ++_iter1326) - { - xfer += oprot->writeString((*_iter1326)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_get_table_meta_result::~ThriftHiveMetastore_get_table_meta_result() throw() { -} - - -uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); uint32_t _size1327; ::apache::thrift::protocol::TType _etype1330; xfer += iprot->readListBegin(_etype1330, _size1327); @@ -8122,7 +8690,7 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto uint32_t _i1331; for (_i1331 = 0; _i1331 < _size1327; ++_i1331) { - xfer += this->success[_i1331].read(iprot); + xfer += iprot->readString(this->success[_i1331]); } xfer += iprot->readListEnd(); } @@ -8151,20 +8719,20 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto return xfer; } -uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_meta_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_materialized_views_for_rewriting_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1332; + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); + std::vector ::const_iterator _iter1332; for (_iter1332 = this->success.begin(); _iter1332 != this->success.end(); ++_iter1332) { - xfer += (*_iter1332).write(oprot); + xfer += oprot->writeString((*_iter1332)); } xfer += oprot->writeListEnd(); } @@ -8180,11 +8748,11 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot } -ThriftHiveMetastore_get_table_meta_presult::~ThriftHiveMetastore_get_table_meta_presult() throw() { +ThriftHiveMetastore_get_materialized_views_for_rewriting_presult::~ThriftHiveMetastore_get_materialized_views_for_rewriting_presult() throw() { } -uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -8216,7 +8784,306 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot uint32_t _i1337; for (_i1337 = 0; _i1337 < _size1333; ++_i1337) { - xfer += (*(this->success))[_i1337].read(iprot); + xfer += iprot->readString((*(this->success))[_i1337]); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_table_meta_args::~ThriftHiveMetastore_get_table_meta_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_patterns); + this->__isset.db_patterns = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tbl_patterns); + this->__isset.tbl_patterns = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->tbl_types.clear(); + uint32_t _size1338; + ::apache::thrift::protocol::TType _etype1341; + xfer += iprot->readListBegin(_etype1341, _size1338); + this->tbl_types.resize(_size1338); + uint32_t _i1342; + for (_i1342 = 0; _i1342 < _size1338; ++_i1342) + { + xfer += iprot->readString(this->tbl_types[_i1342]); + } + xfer += iprot->readListEnd(); + } + this->__isset.tbl_types = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_meta_args"); + + xfer += oprot->writeFieldBegin("db_patterns", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_patterns); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_patterns", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tbl_patterns); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_types.size())); + std::vector ::const_iterator _iter1343; + for (_iter1343 = this->tbl_types.begin(); _iter1343 != this->tbl_types.end(); ++_iter1343) + { + xfer += oprot->writeString((*_iter1343)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_table_meta_pargs::~ThriftHiveMetastore_get_table_meta_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_meta_pargs"); + + xfer += oprot->writeFieldBegin("db_patterns", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_patterns))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_patterns", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->tbl_patterns))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_types)).size())); + std::vector ::const_iterator _iter1344; + for (_iter1344 = (*(this->tbl_types)).begin(); _iter1344 != (*(this->tbl_types)).end(); ++_iter1344) + { + xfer += oprot->writeString((*_iter1344)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_table_meta_result::~ThriftHiveMetastore_get_table_meta_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1345; + ::apache::thrift::protocol::TType _etype1348; + xfer += iprot->readListBegin(_etype1348, _size1345); + this->success.resize(_size1345); + uint32_t _i1349; + for (_i1349 = 0; _i1349 < _size1345; ++_i1349) + { + xfer += this->success[_i1349].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_meta_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1350; + for (_iter1350 = this->success.begin(); _iter1350 != this->success.end(); ++_iter1350) + { + xfer += (*_iter1350).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_table_meta_presult::~ThriftHiveMetastore_get_table_meta_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1351; + ::apache::thrift::protocol::TType _etype1354; + xfer += iprot->readListBegin(_etype1354, _size1351); + (*(this->success)).resize(_size1351); + uint32_t _i1355; + for (_i1355 = 0; _i1355 < _size1351; ++_i1355) + { + xfer += (*(this->success))[_i1355].read(iprot); } xfer += iprot->readListEnd(); } @@ -8354,14 +9221,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1338; - ::apache::thrift::protocol::TType _etype1341; - xfer += iprot->readListBegin(_etype1341, _size1338); - this->success.resize(_size1338); - uint32_t _i1342; - for (_i1342 = 0; _i1342 < _size1338; ++_i1342) + uint32_t _size1356; + ::apache::thrift::protocol::TType _etype1359; + xfer += iprot->readListBegin(_etype1359, _size1356); + this->success.resize(_size1356); + uint32_t _i1360; + for (_i1360 = 0; _i1360 < _size1356; ++_i1360) { - xfer += iprot->readString(this->success[_i1342]); + xfer += iprot->readString(this->success[_i1360]); } xfer += iprot->readListEnd(); } @@ -8400,10 +9267,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1343; - for (_iter1343 = this->success.begin(); _iter1343 != this->success.end(); ++_iter1343) + std::vector ::const_iterator _iter1361; + for (_iter1361 = this->success.begin(); _iter1361 != this->success.end(); ++_iter1361) { - xfer += oprot->writeString((*_iter1343)); + xfer += oprot->writeString((*_iter1361)); } xfer += oprot->writeListEnd(); } @@ -8448,14 +9315,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1344; - ::apache::thrift::protocol::TType _etype1347; - xfer += iprot->readListBegin(_etype1347, _size1344); - (*(this->success)).resize(_size1344); - uint32_t _i1348; - for (_i1348 = 0; _i1348 < _size1344; ++_i1348) + uint32_t _size1362; + ::apache::thrift::protocol::TType _etype1365; + xfer += iprot->readListBegin(_etype1365, _size1362); + (*(this->success)).resize(_size1362); + uint32_t _i1366; + for (_i1366 = 0; _i1366 < _size1362; ++_i1366) { - xfer += iprot->readString((*(this->success))[_i1348]); + xfer += iprot->readString((*(this->success))[_i1366]); } xfer += iprot->readListEnd(); } @@ -8765,14 +9632,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size1349; - ::apache::thrift::protocol::TType _etype1352; - xfer += iprot->readListBegin(_etype1352, _size1349); - this->tbl_names.resize(_size1349); - uint32_t _i1353; - for (_i1353 = 0; _i1353 < _size1349; ++_i1353) + uint32_t _size1367; + ::apache::thrift::protocol::TType _etype1370; + xfer += iprot->readListBegin(_etype1370, _size1367); + this->tbl_names.resize(_size1367); + uint32_t _i1371; + for (_i1371 = 0; _i1371 < _size1367; ++_i1371) { - xfer += iprot->readString(this->tbl_names[_i1353]); + xfer += iprot->readString(this->tbl_names[_i1371]); } xfer += iprot->readListEnd(); } @@ -8805,10 +9672,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter1354; - for (_iter1354 = this->tbl_names.begin(); _iter1354 != this->tbl_names.end(); ++_iter1354) + std::vector ::const_iterator _iter1372; + for (_iter1372 = this->tbl_names.begin(); _iter1372 != this->tbl_names.end(); ++_iter1372) { - xfer += oprot->writeString((*_iter1354)); + xfer += oprot->writeString((*_iter1372)); } xfer += oprot->writeListEnd(); } @@ -8836,10 +9703,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter1355; - for (_iter1355 = (*(this->tbl_names)).begin(); _iter1355 != (*(this->tbl_names)).end(); ++_iter1355) + std::vector ::const_iterator _iter1373; + for (_iter1373 = (*(this->tbl_names)).begin(); _iter1373 != (*(this->tbl_names)).end(); ++_iter1373) { - xfer += oprot->writeString((*_iter1355)); + xfer += oprot->writeString((*_iter1373)); } xfer += oprot->writeListEnd(); } @@ -8880,14 +9747,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1356; - ::apache::thrift::protocol::TType _etype1359; - xfer += iprot->readListBegin(_etype1359, _size1356); - this->success.resize(_size1356); - uint32_t _i1360; - for (_i1360 = 0; _i1360 < _size1356; ++_i1360) + uint32_t _size1374; + ::apache::thrift::protocol::TType _etype1377; + xfer += iprot->readListBegin(_etype1377, _size1374); + this->success.resize(_size1374); + uint32_t _i1378; + for (_i1378 = 0; _i1378 < _size1374; ++_i1378) { - xfer += this->success[_i1360].read(iprot); + xfer += this->success[_i1378].read(iprot); } xfer += iprot->readListEnd(); } @@ -8918,10 +9785,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector
::const_iterator _iter1361; - for (_iter1361 = this->success.begin(); _iter1361 != this->success.end(); ++_iter1361) + std::vector
::const_iterator _iter1379; + for (_iter1379 = this->success.begin(); _iter1379 != this->success.end(); ++_iter1379) { - xfer += (*_iter1361).write(oprot); + xfer += (*_iter1379).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8962,14 +9829,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1362; - ::apache::thrift::protocol::TType _etype1365; - xfer += iprot->readListBegin(_etype1365, _size1362); - (*(this->success)).resize(_size1362); - uint32_t _i1366; - for (_i1366 = 0; _i1366 < _size1362; ++_i1366) + uint32_t _size1380; + ::apache::thrift::protocol::TType _etype1383; + xfer += iprot->readListBegin(_etype1383, _size1380); + (*(this->success)).resize(_size1380); + uint32_t _i1384; + for (_i1384 = 0; _i1384 < _size1380; ++_i1384) { - xfer += (*(this->success))[_i1366].read(iprot); + xfer += (*(this->success))[_i1384].read(iprot); } xfer += iprot->readListEnd(); } @@ -9502,14 +10369,14 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size1367; - ::apache::thrift::protocol::TType _etype1370; - xfer += iprot->readListBegin(_etype1370, _size1367); - this->tbl_names.resize(_size1367); - uint32_t _i1371; - for (_i1371 = 0; _i1371 < _size1367; ++_i1371) + uint32_t _size1385; + ::apache::thrift::protocol::TType _etype1388; + xfer += iprot->readListBegin(_etype1388, _size1385); + this->tbl_names.resize(_size1385); + uint32_t _i1389; + for (_i1389 = 0; _i1389 < _size1385; ++_i1389) { - xfer += iprot->readString(this->tbl_names[_i1371]); + xfer += iprot->readString(this->tbl_names[_i1389]); } xfer += iprot->readListEnd(); } @@ -9542,10 +10409,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::write(: xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter1372; - for (_iter1372 = this->tbl_names.begin(); _iter1372 != this->tbl_names.end(); ++_iter1372) + std::vector ::const_iterator _iter1390; + for (_iter1390 = this->tbl_names.begin(); _iter1390 != this->tbl_names.end(); ++_iter1390) { - xfer += oprot->writeString((*_iter1372)); + xfer += oprot->writeString((*_iter1390)); } xfer += oprot->writeListEnd(); } @@ -9573,10 +10440,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_pargs::write( xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter1373; - for (_iter1373 = (*(this->tbl_names)).begin(); _iter1373 != (*(this->tbl_names)).end(); ++_iter1373) + std::vector ::const_iterator _iter1391; + for (_iter1391 = (*(this->tbl_names)).begin(); _iter1391 != (*(this->tbl_names)).end(); ++_iter1391) { - xfer += oprot->writeString((*_iter1373)); + xfer += oprot->writeString((*_iter1391)); } xfer += oprot->writeListEnd(); } @@ -9617,17 +10484,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::read( if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1374; - ::apache::thrift::protocol::TType _ktype1375; - ::apache::thrift::protocol::TType _vtype1376; - xfer += iprot->readMapBegin(_ktype1375, _vtype1376, _size1374); - uint32_t _i1378; - for (_i1378 = 0; _i1378 < _size1374; ++_i1378) + uint32_t _size1392; + ::apache::thrift::protocol::TType _ktype1393; + ::apache::thrift::protocol::TType _vtype1394; + xfer += iprot->readMapBegin(_ktype1393, _vtype1394, _size1392); + uint32_t _i1396; + for (_i1396 = 0; _i1396 < _size1392; ++_i1396) { - std::string _key1379; - xfer += iprot->readString(_key1379); - Materialization& _val1380 = this->success[_key1379]; - xfer += _val1380.read(iprot); + std::string _key1397; + xfer += iprot->readString(_key1397); + Materialization& _val1398 = this->success[_key1397]; + xfer += _val1398.read(iprot); } xfer += iprot->readMapEnd(); } @@ -9682,11 +10549,11 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::write xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter1381; - for (_iter1381 = this->success.begin(); _iter1381 != this->success.end(); ++_iter1381) + std::map ::const_iterator _iter1399; + for (_iter1399 = this->success.begin(); _iter1399 != this->success.end(); ++_iter1399) { - xfer += oprot->writeString(_iter1381->first); - xfer += _iter1381->second.write(oprot); + xfer += oprot->writeString(_iter1399->first); + xfer += _iter1399->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -9739,17 +10606,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_presult::read if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1382; - ::apache::thrift::protocol::TType _ktype1383; - ::apache::thrift::protocol::TType _vtype1384; - xfer += iprot->readMapBegin(_ktype1383, _vtype1384, _size1382); - uint32_t _i1386; - for (_i1386 = 0; _i1386 < _size1382; ++_i1386) + uint32_t _size1400; + ::apache::thrift::protocol::TType _ktype1401; + ::apache::thrift::protocol::TType _vtype1402; + xfer += iprot->readMapBegin(_ktype1401, _vtype1402, _size1400); + uint32_t _i1404; + for (_i1404 = 0; _i1404 < _size1400; ++_i1404) { - std::string _key1387; - xfer += iprot->readString(_key1387); - Materialization& _val1388 = (*(this->success))[_key1387]; - xfer += _val1388.read(iprot); + std::string _key1405; + xfer += iprot->readString(_key1405); + Materialization& _val1406 = (*(this->success))[_key1405]; + xfer += _val1406.read(iprot); } xfer += iprot->readMapEnd(); } @@ -9822,13 +10689,21 @@ uint32_t ThriftHiveMetastore_update_creation_metadata_args::read(::apache::thrif { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->dbname); this->__isset.dbname = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->tbl_name); this->__isset.tbl_name = true; @@ -9836,7 +10711,7 @@ uint32_t ThriftHiveMetastore_update_creation_metadata_args::read(::apache::thrif xfer += iprot->skip(ftype); } break; - case 3: + case 4: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->creation_metadata.read(iprot); this->__isset.creation_metadata = true; @@ -9861,15 +10736,19 @@ uint32_t ThriftHiveMetastore_update_creation_metadata_args::write(::apache::thri apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_creation_metadata_args"); - xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("creation_metadata", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += oprot->writeFieldBegin("creation_metadata", ::apache::thrift::protocol::T_STRUCT, 4); xfer += this->creation_metadata.write(oprot); xfer += oprot->writeFieldEnd(); @@ -9888,15 +10767,19 @@ uint32_t ThriftHiveMetastore_update_creation_metadata_pargs::write(::apache::thr apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_creation_metadata_pargs"); - xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->catName))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->dbname))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("creation_metadata", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += oprot->writeFieldBegin("creation_metadata", ::apache::thrift::protocol::T_STRUCT, 4); xfer += (*(this->creation_metadata)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -10194,14 +11077,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1389; - ::apache::thrift::protocol::TType _etype1392; - xfer += iprot->readListBegin(_etype1392, _size1389); - this->success.resize(_size1389); - uint32_t _i1393; - for (_i1393 = 0; _i1393 < _size1389; ++_i1393) + uint32_t _size1407; + ::apache::thrift::protocol::TType _etype1410; + xfer += iprot->readListBegin(_etype1410, _size1407); + this->success.resize(_size1407); + uint32_t _i1411; + for (_i1411 = 0; _i1411 < _size1407; ++_i1411) { - xfer += iprot->readString(this->success[_i1393]); + xfer += iprot->readString(this->success[_i1411]); } xfer += iprot->readListEnd(); } @@ -10256,10 +11139,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1394; - for (_iter1394 = this->success.begin(); _iter1394 != this->success.end(); ++_iter1394) + std::vector ::const_iterator _iter1412; + for (_iter1412 = this->success.begin(); _iter1412 != this->success.end(); ++_iter1412) { - xfer += oprot->writeString((*_iter1394)); + xfer += oprot->writeString((*_iter1412)); } xfer += oprot->writeListEnd(); } @@ -10312,14 +11195,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1395; - ::apache::thrift::protocol::TType _etype1398; - xfer += iprot->readListBegin(_etype1398, _size1395); - (*(this->success)).resize(_size1395); - uint32_t _i1399; - for (_i1399 = 0; _i1399 < _size1395; ++_i1399) + uint32_t _size1413; + ::apache::thrift::protocol::TType _etype1416; + xfer += iprot->readListBegin(_etype1416, _size1413); + (*(this->success)).resize(_size1413); + uint32_t _i1417; + for (_i1417 = 0; _i1417 < _size1413; ++_i1417) { - xfer += iprot->readString((*(this->success))[_i1399]); + xfer += iprot->readString((*(this->success))[_i1417]); } xfer += iprot->readListEnd(); } @@ -11653,14 +12536,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1400; - ::apache::thrift::protocol::TType _etype1403; - xfer += iprot->readListBegin(_etype1403, _size1400); - this->new_parts.resize(_size1400); - uint32_t _i1404; - for (_i1404 = 0; _i1404 < _size1400; ++_i1404) + uint32_t _size1418; + ::apache::thrift::protocol::TType _etype1421; + xfer += iprot->readListBegin(_etype1421, _size1418); + this->new_parts.resize(_size1418); + uint32_t _i1422; + for (_i1422 = 0; _i1422 < _size1418; ++_i1422) { - xfer += this->new_parts[_i1404].read(iprot); + xfer += this->new_parts[_i1422].read(iprot); } xfer += iprot->readListEnd(); } @@ -11689,10 +12572,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1405; - for (_iter1405 = this->new_parts.begin(); _iter1405 != this->new_parts.end(); ++_iter1405) + std::vector ::const_iterator _iter1423; + for (_iter1423 = this->new_parts.begin(); _iter1423 != this->new_parts.end(); ++_iter1423) { - xfer += (*_iter1405).write(oprot); + xfer += (*_iter1423).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11716,10 +12599,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1406; - for (_iter1406 = (*(this->new_parts)).begin(); _iter1406 != (*(this->new_parts)).end(); ++_iter1406) + std::vector ::const_iterator _iter1424; + for (_iter1424 = (*(this->new_parts)).begin(); _iter1424 != (*(this->new_parts)).end(); ++_iter1424) { - xfer += (*_iter1406).write(oprot); + xfer += (*_iter1424).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11928,14 +12811,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1407; - ::apache::thrift::protocol::TType _etype1410; - xfer += iprot->readListBegin(_etype1410, _size1407); - this->new_parts.resize(_size1407); - uint32_t _i1411; - for (_i1411 = 0; _i1411 < _size1407; ++_i1411) + uint32_t _size1425; + ::apache::thrift::protocol::TType _etype1428; + xfer += iprot->readListBegin(_etype1428, _size1425); + this->new_parts.resize(_size1425); + uint32_t _i1429; + for (_i1429 = 0; _i1429 < _size1425; ++_i1429) { - xfer += this->new_parts[_i1411].read(iprot); + xfer += this->new_parts[_i1429].read(iprot); } xfer += iprot->readListEnd(); } @@ -11964,10 +12847,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1412; - for (_iter1412 = this->new_parts.begin(); _iter1412 != this->new_parts.end(); ++_iter1412) + std::vector ::const_iterator _iter1430; + for (_iter1430 = this->new_parts.begin(); _iter1430 != this->new_parts.end(); ++_iter1430) { - xfer += (*_iter1412).write(oprot); + xfer += (*_iter1430).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11991,10 +12874,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1413; - for (_iter1413 = (*(this->new_parts)).begin(); _iter1413 != (*(this->new_parts)).end(); ++_iter1413) + std::vector ::const_iterator _iter1431; + for (_iter1431 = (*(this->new_parts)).begin(); _iter1431 != (*(this->new_parts)).end(); ++_iter1431) { - xfer += (*_iter1413).write(oprot); + xfer += (*_iter1431).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12219,14 +13102,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1414; - ::apache::thrift::protocol::TType _etype1417; - xfer += iprot->readListBegin(_etype1417, _size1414); - this->part_vals.resize(_size1414); - uint32_t _i1418; - for (_i1418 = 0; _i1418 < _size1414; ++_i1418) + uint32_t _size1432; + ::apache::thrift::protocol::TType _etype1435; + xfer += iprot->readListBegin(_etype1435, _size1432); + this->part_vals.resize(_size1432); + uint32_t _i1436; + for (_i1436 = 0; _i1436 < _size1432; ++_i1436) { - xfer += iprot->readString(this->part_vals[_i1418]); + xfer += iprot->readString(this->part_vals[_i1436]); } xfer += iprot->readListEnd(); } @@ -12263,10 +13146,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1419; - for (_iter1419 = this->part_vals.begin(); _iter1419 != this->part_vals.end(); ++_iter1419) + std::vector ::const_iterator _iter1437; + for (_iter1437 = this->part_vals.begin(); _iter1437 != this->part_vals.end(); ++_iter1437) { - xfer += oprot->writeString((*_iter1419)); + xfer += oprot->writeString((*_iter1437)); } xfer += oprot->writeListEnd(); } @@ -12298,10 +13181,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1420; - for (_iter1420 = (*(this->part_vals)).begin(); _iter1420 != (*(this->part_vals)).end(); ++_iter1420) + std::vector ::const_iterator _iter1438; + for (_iter1438 = (*(this->part_vals)).begin(); _iter1438 != (*(this->part_vals)).end(); ++_iter1438) { - xfer += oprot->writeString((*_iter1420)); + xfer += oprot->writeString((*_iter1438)); } xfer += oprot->writeListEnd(); } @@ -12773,14 +13656,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1421; - ::apache::thrift::protocol::TType _etype1424; - xfer += iprot->readListBegin(_etype1424, _size1421); - this->part_vals.resize(_size1421); - uint32_t _i1425; - for (_i1425 = 0; _i1425 < _size1421; ++_i1425) + uint32_t _size1439; + ::apache::thrift::protocol::TType _etype1442; + xfer += iprot->readListBegin(_etype1442, _size1439); + this->part_vals.resize(_size1439); + uint32_t _i1443; + for (_i1443 = 0; _i1443 < _size1439; ++_i1443) { - xfer += iprot->readString(this->part_vals[_i1425]); + xfer += iprot->readString(this->part_vals[_i1443]); } xfer += iprot->readListEnd(); } @@ -12825,10 +13708,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1426; - for (_iter1426 = this->part_vals.begin(); _iter1426 != this->part_vals.end(); ++_iter1426) + std::vector ::const_iterator _iter1444; + for (_iter1444 = this->part_vals.begin(); _iter1444 != this->part_vals.end(); ++_iter1444) { - xfer += oprot->writeString((*_iter1426)); + xfer += oprot->writeString((*_iter1444)); } xfer += oprot->writeListEnd(); } @@ -12864,10 +13747,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1427; - for (_iter1427 = (*(this->part_vals)).begin(); _iter1427 != (*(this->part_vals)).end(); ++_iter1427) + std::vector ::const_iterator _iter1445; + for (_iter1445 = (*(this->part_vals)).begin(); _iter1445 != (*(this->part_vals)).end(); ++_iter1445) { - xfer += oprot->writeString((*_iter1427)); + xfer += oprot->writeString((*_iter1445)); } xfer += oprot->writeListEnd(); } @@ -13670,14 +14553,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1428; - ::apache::thrift::protocol::TType _etype1431; - xfer += iprot->readListBegin(_etype1431, _size1428); - this->part_vals.resize(_size1428); - uint32_t _i1432; - for (_i1432 = 0; _i1432 < _size1428; ++_i1432) + uint32_t _size1446; + ::apache::thrift::protocol::TType _etype1449; + xfer += iprot->readListBegin(_etype1449, _size1446); + this->part_vals.resize(_size1446); + uint32_t _i1450; + for (_i1450 = 0; _i1450 < _size1446; ++_i1450) { - xfer += iprot->readString(this->part_vals[_i1432]); + xfer += iprot->readString(this->part_vals[_i1450]); } xfer += iprot->readListEnd(); } @@ -13722,10 +14605,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1433; - for (_iter1433 = this->part_vals.begin(); _iter1433 != this->part_vals.end(); ++_iter1433) + std::vector ::const_iterator _iter1451; + for (_iter1451 = this->part_vals.begin(); _iter1451 != this->part_vals.end(); ++_iter1451) { - xfer += oprot->writeString((*_iter1433)); + xfer += oprot->writeString((*_iter1451)); } xfer += oprot->writeListEnd(); } @@ -13761,10 +14644,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1434; - for (_iter1434 = (*(this->part_vals)).begin(); _iter1434 != (*(this->part_vals)).end(); ++_iter1434) + std::vector ::const_iterator _iter1452; + for (_iter1452 = (*(this->part_vals)).begin(); _iter1452 != (*(this->part_vals)).end(); ++_iter1452) { - xfer += oprot->writeString((*_iter1434)); + xfer += oprot->writeString((*_iter1452)); } xfer += oprot->writeListEnd(); } @@ -13973,14 +14856,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1435; - ::apache::thrift::protocol::TType _etype1438; - xfer += iprot->readListBegin(_etype1438, _size1435); - this->part_vals.resize(_size1435); - uint32_t _i1439; - for (_i1439 = 0; _i1439 < _size1435; ++_i1439) + uint32_t _size1453; + ::apache::thrift::protocol::TType _etype1456; + xfer += iprot->readListBegin(_etype1456, _size1453); + this->part_vals.resize(_size1453); + uint32_t _i1457; + for (_i1457 = 0; _i1457 < _size1453; ++_i1457) { - xfer += iprot->readString(this->part_vals[_i1439]); + xfer += iprot->readString(this->part_vals[_i1457]); } xfer += iprot->readListEnd(); } @@ -14033,10 +14916,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1440; - for (_iter1440 = this->part_vals.begin(); _iter1440 != this->part_vals.end(); ++_iter1440) + std::vector ::const_iterator _iter1458; + for (_iter1458 = this->part_vals.begin(); _iter1458 != this->part_vals.end(); ++_iter1458) { - xfer += oprot->writeString((*_iter1440)); + xfer += oprot->writeString((*_iter1458)); } xfer += oprot->writeListEnd(); } @@ -14076,10 +14959,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1441; - for (_iter1441 = (*(this->part_vals)).begin(); _iter1441 != (*(this->part_vals)).end(); ++_iter1441) + std::vector ::const_iterator _iter1459; + for (_iter1459 = (*(this->part_vals)).begin(); _iter1459 != (*(this->part_vals)).end(); ++_iter1459) { - xfer += oprot->writeString((*_iter1441)); + xfer += oprot->writeString((*_iter1459)); } xfer += oprot->writeListEnd(); } @@ -15085,14 +15968,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1442; - ::apache::thrift::protocol::TType _etype1445; - xfer += iprot->readListBegin(_etype1445, _size1442); - this->part_vals.resize(_size1442); - uint32_t _i1446; - for (_i1446 = 0; _i1446 < _size1442; ++_i1446) + uint32_t _size1460; + ::apache::thrift::protocol::TType _etype1463; + xfer += iprot->readListBegin(_etype1463, _size1460); + this->part_vals.resize(_size1460); + uint32_t _i1464; + for (_i1464 = 0; _i1464 < _size1460; ++_i1464) { - xfer += iprot->readString(this->part_vals[_i1446]); + xfer += iprot->readString(this->part_vals[_i1464]); } xfer += iprot->readListEnd(); } @@ -15129,10 +16012,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1447; - for (_iter1447 = this->part_vals.begin(); _iter1447 != this->part_vals.end(); ++_iter1447) + std::vector ::const_iterator _iter1465; + for (_iter1465 = this->part_vals.begin(); _iter1465 != this->part_vals.end(); ++_iter1465) { - xfer += oprot->writeString((*_iter1447)); + xfer += oprot->writeString((*_iter1465)); } xfer += oprot->writeListEnd(); } @@ -15164,10 +16047,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1448; - for (_iter1448 = (*(this->part_vals)).begin(); _iter1448 != (*(this->part_vals)).end(); ++_iter1448) + std::vector ::const_iterator _iter1466; + for (_iter1466 = (*(this->part_vals)).begin(); _iter1466 != (*(this->part_vals)).end(); ++_iter1466) { - xfer += oprot->writeString((*_iter1448)); + xfer += oprot->writeString((*_iter1466)); } xfer += oprot->writeListEnd(); } @@ -15356,17 +16239,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1449; - ::apache::thrift::protocol::TType _ktype1450; - ::apache::thrift::protocol::TType _vtype1451; - xfer += iprot->readMapBegin(_ktype1450, _vtype1451, _size1449); - uint32_t _i1453; - for (_i1453 = 0; _i1453 < _size1449; ++_i1453) + uint32_t _size1467; + ::apache::thrift::protocol::TType _ktype1468; + ::apache::thrift::protocol::TType _vtype1469; + xfer += iprot->readMapBegin(_ktype1468, _vtype1469, _size1467); + uint32_t _i1471; + for (_i1471 = 0; _i1471 < _size1467; ++_i1471) { - std::string _key1454; - xfer += iprot->readString(_key1454); - std::string& _val1455 = this->partitionSpecs[_key1454]; - xfer += iprot->readString(_val1455); + std::string _key1472; + xfer += iprot->readString(_key1472); + std::string& _val1473 = this->partitionSpecs[_key1472]; + xfer += iprot->readString(_val1473); } xfer += iprot->readMapEnd(); } @@ -15427,11 +16310,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1456; - for (_iter1456 = this->partitionSpecs.begin(); _iter1456 != this->partitionSpecs.end(); ++_iter1456) + std::map ::const_iterator _iter1474; + for (_iter1474 = this->partitionSpecs.begin(); _iter1474 != this->partitionSpecs.end(); ++_iter1474) { - xfer += oprot->writeString(_iter1456->first); - xfer += oprot->writeString(_iter1456->second); + xfer += oprot->writeString(_iter1474->first); + xfer += oprot->writeString(_iter1474->second); } xfer += oprot->writeMapEnd(); } @@ -15471,11 +16354,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1457; - for (_iter1457 = (*(this->partitionSpecs)).begin(); _iter1457 != (*(this->partitionSpecs)).end(); ++_iter1457) + std::map ::const_iterator _iter1475; + for (_iter1475 = (*(this->partitionSpecs)).begin(); _iter1475 != (*(this->partitionSpecs)).end(); ++_iter1475) { - xfer += oprot->writeString(_iter1457->first); - xfer += oprot->writeString(_iter1457->second); + xfer += oprot->writeString(_iter1475->first); + xfer += oprot->writeString(_iter1475->second); } xfer += oprot->writeMapEnd(); } @@ -15720,17 +16603,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1458; - ::apache::thrift::protocol::TType _ktype1459; - ::apache::thrift::protocol::TType _vtype1460; - xfer += iprot->readMapBegin(_ktype1459, _vtype1460, _size1458); - uint32_t _i1462; - for (_i1462 = 0; _i1462 < _size1458; ++_i1462) + uint32_t _size1476; + ::apache::thrift::protocol::TType _ktype1477; + ::apache::thrift::protocol::TType _vtype1478; + xfer += iprot->readMapBegin(_ktype1477, _vtype1478, _size1476); + uint32_t _i1480; + for (_i1480 = 0; _i1480 < _size1476; ++_i1480) { - std::string _key1463; - xfer += iprot->readString(_key1463); - std::string& _val1464 = this->partitionSpecs[_key1463]; - xfer += iprot->readString(_val1464); + std::string _key1481; + xfer += iprot->readString(_key1481); + std::string& _val1482 = this->partitionSpecs[_key1481]; + xfer += iprot->readString(_val1482); } xfer += iprot->readMapEnd(); } @@ -15791,11 +16674,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1465; - for (_iter1465 = this->partitionSpecs.begin(); _iter1465 != this->partitionSpecs.end(); ++_iter1465) + std::map ::const_iterator _iter1483; + for (_iter1483 = this->partitionSpecs.begin(); _iter1483 != this->partitionSpecs.end(); ++_iter1483) { - xfer += oprot->writeString(_iter1465->first); - xfer += oprot->writeString(_iter1465->second); + xfer += oprot->writeString(_iter1483->first); + xfer += oprot->writeString(_iter1483->second); } xfer += oprot->writeMapEnd(); } @@ -15835,11 +16718,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift:: xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1466; - for (_iter1466 = (*(this->partitionSpecs)).begin(); _iter1466 != (*(this->partitionSpecs)).end(); ++_iter1466) + std::map ::const_iterator _iter1484; + for (_iter1484 = (*(this->partitionSpecs)).begin(); _iter1484 != (*(this->partitionSpecs)).end(); ++_iter1484) { - xfer += oprot->writeString(_iter1466->first); - xfer += oprot->writeString(_iter1466->second); + xfer += oprot->writeString(_iter1484->first); + xfer += oprot->writeString(_iter1484->second); } xfer += oprot->writeMapEnd(); } @@ -15896,14 +16779,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1467; - ::apache::thrift::protocol::TType _etype1470; - xfer += iprot->readListBegin(_etype1470, _size1467); - this->success.resize(_size1467); - uint32_t _i1471; - for (_i1471 = 0; _i1471 < _size1467; ++_i1471) + uint32_t _size1485; + ::apache::thrift::protocol::TType _etype1488; + xfer += iprot->readListBegin(_etype1488, _size1485); + this->success.resize(_size1485); + uint32_t _i1489; + for (_i1489 = 0; _i1489 < _size1485; ++_i1489) { - xfer += this->success[_i1471].read(iprot); + xfer += this->success[_i1489].read(iprot); } xfer += iprot->readListEnd(); } @@ -15966,10 +16849,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1472; - for (_iter1472 = this->success.begin(); _iter1472 != this->success.end(); ++_iter1472) + std::vector ::const_iterator _iter1490; + for (_iter1490 = this->success.begin(); _iter1490 != this->success.end(); ++_iter1490) { - xfer += (*_iter1472).write(oprot); + xfer += (*_iter1490).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16026,14 +16909,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1473; - ::apache::thrift::protocol::TType _etype1476; - xfer += iprot->readListBegin(_etype1476, _size1473); - (*(this->success)).resize(_size1473); - uint32_t _i1477; - for (_i1477 = 0; _i1477 < _size1473; ++_i1477) + uint32_t _size1491; + ::apache::thrift::protocol::TType _etype1494; + xfer += iprot->readListBegin(_etype1494, _size1491); + (*(this->success)).resize(_size1491); + uint32_t _i1495; + for (_i1495 = 0; _i1495 < _size1491; ++_i1495) { - xfer += (*(this->success))[_i1477].read(iprot); + xfer += (*(this->success))[_i1495].read(iprot); } xfer += iprot->readListEnd(); } @@ -16132,14 +17015,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1478; - ::apache::thrift::protocol::TType _etype1481; - xfer += iprot->readListBegin(_etype1481, _size1478); - this->part_vals.resize(_size1478); - uint32_t _i1482; - for (_i1482 = 0; _i1482 < _size1478; ++_i1482) + uint32_t _size1496; + ::apache::thrift::protocol::TType _etype1499; + xfer += iprot->readListBegin(_etype1499, _size1496); + this->part_vals.resize(_size1496); + uint32_t _i1500; + for (_i1500 = 0; _i1500 < _size1496; ++_i1500) { - xfer += iprot->readString(this->part_vals[_i1482]); + xfer += iprot->readString(this->part_vals[_i1500]); } xfer += iprot->readListEnd(); } @@ -16160,14 +17043,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1483; - ::apache::thrift::protocol::TType _etype1486; - xfer += iprot->readListBegin(_etype1486, _size1483); - this->group_names.resize(_size1483); - uint32_t _i1487; - for (_i1487 = 0; _i1487 < _size1483; ++_i1487) + uint32_t _size1501; + ::apache::thrift::protocol::TType _etype1504; + xfer += iprot->readListBegin(_etype1504, _size1501); + this->group_names.resize(_size1501); + uint32_t _i1505; + for (_i1505 = 0; _i1505 < _size1501; ++_i1505) { - xfer += iprot->readString(this->group_names[_i1487]); + xfer += iprot->readString(this->group_names[_i1505]); } xfer += iprot->readListEnd(); } @@ -16204,10 +17087,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1488; - for (_iter1488 = this->part_vals.begin(); _iter1488 != this->part_vals.end(); ++_iter1488) + std::vector ::const_iterator _iter1506; + for (_iter1506 = this->part_vals.begin(); _iter1506 != this->part_vals.end(); ++_iter1506) { - xfer += oprot->writeString((*_iter1488)); + xfer += oprot->writeString((*_iter1506)); } xfer += oprot->writeListEnd(); } @@ -16220,10 +17103,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1489; - for (_iter1489 = this->group_names.begin(); _iter1489 != this->group_names.end(); ++_iter1489) + std::vector ::const_iterator _iter1507; + for (_iter1507 = this->group_names.begin(); _iter1507 != this->group_names.end(); ++_iter1507) { - xfer += oprot->writeString((*_iter1489)); + xfer += oprot->writeString((*_iter1507)); } xfer += oprot->writeListEnd(); } @@ -16255,10 +17138,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1490; - for (_iter1490 = (*(this->part_vals)).begin(); _iter1490 != (*(this->part_vals)).end(); ++_iter1490) + std::vector ::const_iterator _iter1508; + for (_iter1508 = (*(this->part_vals)).begin(); _iter1508 != (*(this->part_vals)).end(); ++_iter1508) { - xfer += oprot->writeString((*_iter1490)); + xfer += oprot->writeString((*_iter1508)); } xfer += oprot->writeListEnd(); } @@ -16271,10 +17154,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1491; - for (_iter1491 = (*(this->group_names)).begin(); _iter1491 != (*(this->group_names)).end(); ++_iter1491) + std::vector ::const_iterator _iter1509; + for (_iter1509 = (*(this->group_names)).begin(); _iter1509 != (*(this->group_names)).end(); ++_iter1509) { - xfer += oprot->writeString((*_iter1491)); + xfer += oprot->writeString((*_iter1509)); } xfer += oprot->writeListEnd(); } @@ -16833,14 +17716,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1492; - ::apache::thrift::protocol::TType _etype1495; - xfer += iprot->readListBegin(_etype1495, _size1492); - this->success.resize(_size1492); - uint32_t _i1496; - for (_i1496 = 0; _i1496 < _size1492; ++_i1496) + uint32_t _size1510; + ::apache::thrift::protocol::TType _etype1513; + xfer += iprot->readListBegin(_etype1513, _size1510); + this->success.resize(_size1510); + uint32_t _i1514; + for (_i1514 = 0; _i1514 < _size1510; ++_i1514) { - xfer += this->success[_i1496].read(iprot); + xfer += this->success[_i1514].read(iprot); } xfer += iprot->readListEnd(); } @@ -16887,10 +17770,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1497; - for (_iter1497 = this->success.begin(); _iter1497 != this->success.end(); ++_iter1497) + std::vector ::const_iterator _iter1515; + for (_iter1515 = this->success.begin(); _iter1515 != this->success.end(); ++_iter1515) { - xfer += (*_iter1497).write(oprot); + xfer += (*_iter1515).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16939,14 +17822,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1498; - ::apache::thrift::protocol::TType _etype1501; - xfer += iprot->readListBegin(_etype1501, _size1498); - (*(this->success)).resize(_size1498); - uint32_t _i1502; - for (_i1502 = 0; _i1502 < _size1498; ++_i1502) + uint32_t _size1516; + ::apache::thrift::protocol::TType _etype1519; + xfer += iprot->readListBegin(_etype1519, _size1516); + (*(this->success)).resize(_size1516); + uint32_t _i1520; + for (_i1520 = 0; _i1520 < _size1516; ++_i1520) { - xfer += (*(this->success))[_i1502].read(iprot); + xfer += (*(this->success))[_i1520].read(iprot); } xfer += iprot->readListEnd(); } @@ -17045,14 +17928,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1503; - ::apache::thrift::protocol::TType _etype1506; - xfer += iprot->readListBegin(_etype1506, _size1503); - this->group_names.resize(_size1503); - uint32_t _i1507; - for (_i1507 = 0; _i1507 < _size1503; ++_i1507) + uint32_t _size1521; + ::apache::thrift::protocol::TType _etype1524; + xfer += iprot->readListBegin(_etype1524, _size1521); + this->group_names.resize(_size1521); + uint32_t _i1525; + for (_i1525 = 0; _i1525 < _size1521; ++_i1525) { - xfer += iprot->readString(this->group_names[_i1507]); + xfer += iprot->readString(this->group_names[_i1525]); } xfer += iprot->readListEnd(); } @@ -17097,10 +17980,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1508; - for (_iter1508 = this->group_names.begin(); _iter1508 != this->group_names.end(); ++_iter1508) + std::vector ::const_iterator _iter1526; + for (_iter1526 = this->group_names.begin(); _iter1526 != this->group_names.end(); ++_iter1526) { - xfer += oprot->writeString((*_iter1508)); + xfer += oprot->writeString((*_iter1526)); } xfer += oprot->writeListEnd(); } @@ -17140,10 +18023,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1509; - for (_iter1509 = (*(this->group_names)).begin(); _iter1509 != (*(this->group_names)).end(); ++_iter1509) + std::vector ::const_iterator _iter1527; + for (_iter1527 = (*(this->group_names)).begin(); _iter1527 != (*(this->group_names)).end(); ++_iter1527) { - xfer += oprot->writeString((*_iter1509)); + xfer += oprot->writeString((*_iter1527)); } xfer += oprot->writeListEnd(); } @@ -17184,14 +18067,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1510; - ::apache::thrift::protocol::TType _etype1513; - xfer += iprot->readListBegin(_etype1513, _size1510); - this->success.resize(_size1510); - uint32_t _i1514; - for (_i1514 = 0; _i1514 < _size1510; ++_i1514) + uint32_t _size1528; + ::apache::thrift::protocol::TType _etype1531; + xfer += iprot->readListBegin(_etype1531, _size1528); + this->success.resize(_size1528); + uint32_t _i1532; + for (_i1532 = 0; _i1532 < _size1528; ++_i1532) { - xfer += this->success[_i1514].read(iprot); + xfer += this->success[_i1532].read(iprot); } xfer += iprot->readListEnd(); } @@ -17238,10 +18121,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1515; - for (_iter1515 = this->success.begin(); _iter1515 != this->success.end(); ++_iter1515) + std::vector ::const_iterator _iter1533; + for (_iter1533 = this->success.begin(); _iter1533 != this->success.end(); ++_iter1533) { - xfer += (*_iter1515).write(oprot); + xfer += (*_iter1533).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17290,14 +18173,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1516; - ::apache::thrift::protocol::TType _etype1519; - xfer += iprot->readListBegin(_etype1519, _size1516); - (*(this->success)).resize(_size1516); - uint32_t _i1520; - for (_i1520 = 0; _i1520 < _size1516; ++_i1520) + uint32_t _size1534; + ::apache::thrift::protocol::TType _etype1537; + xfer += iprot->readListBegin(_etype1537, _size1534); + (*(this->success)).resize(_size1534); + uint32_t _i1538; + for (_i1538 = 0; _i1538 < _size1534; ++_i1538) { - xfer += (*(this->success))[_i1520].read(iprot); + xfer += (*(this->success))[_i1538].read(iprot); } xfer += iprot->readListEnd(); } @@ -17475,14 +18358,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1521; - ::apache::thrift::protocol::TType _etype1524; - xfer += iprot->readListBegin(_etype1524, _size1521); - this->success.resize(_size1521); - uint32_t _i1525; - for (_i1525 = 0; _i1525 < _size1521; ++_i1525) + uint32_t _size1539; + ::apache::thrift::protocol::TType _etype1542; + xfer += iprot->readListBegin(_etype1542, _size1539); + this->success.resize(_size1539); + uint32_t _i1543; + for (_i1543 = 0; _i1543 < _size1539; ++_i1543) { - xfer += this->success[_i1525].read(iprot); + xfer += this->success[_i1543].read(iprot); } xfer += iprot->readListEnd(); } @@ -17529,10 +18412,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1526; - for (_iter1526 = this->success.begin(); _iter1526 != this->success.end(); ++_iter1526) + std::vector ::const_iterator _iter1544; + for (_iter1544 = this->success.begin(); _iter1544 != this->success.end(); ++_iter1544) { - xfer += (*_iter1526).write(oprot); + xfer += (*_iter1544).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17581,14 +18464,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1527; - ::apache::thrift::protocol::TType _etype1530; - xfer += iprot->readListBegin(_etype1530, _size1527); - (*(this->success)).resize(_size1527); - uint32_t _i1531; - for (_i1531 = 0; _i1531 < _size1527; ++_i1531) + uint32_t _size1545; + ::apache::thrift::protocol::TType _etype1548; + xfer += iprot->readListBegin(_etype1548, _size1545); + (*(this->success)).resize(_size1545); + uint32_t _i1549; + for (_i1549 = 0; _i1549 < _size1545; ++_i1549) { - xfer += (*(this->success))[_i1531].read(iprot); + xfer += (*(this->success))[_i1549].read(iprot); } xfer += iprot->readListEnd(); } @@ -17766,14 +18649,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1532; - ::apache::thrift::protocol::TType _etype1535; - xfer += iprot->readListBegin(_etype1535, _size1532); - this->success.resize(_size1532); - uint32_t _i1536; - for (_i1536 = 0; _i1536 < _size1532; ++_i1536) + uint32_t _size1550; + ::apache::thrift::protocol::TType _etype1553; + xfer += iprot->readListBegin(_etype1553, _size1550); + this->success.resize(_size1550); + uint32_t _i1554; + for (_i1554 = 0; _i1554 < _size1550; ++_i1554) { - xfer += iprot->readString(this->success[_i1536]); + xfer += iprot->readString(this->success[_i1554]); } xfer += iprot->readListEnd(); } @@ -17820,10 +18703,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1537; - for (_iter1537 = this->success.begin(); _iter1537 != this->success.end(); ++_iter1537) + std::vector ::const_iterator _iter1555; + for (_iter1555 = this->success.begin(); _iter1555 != this->success.end(); ++_iter1555) { - xfer += oprot->writeString((*_iter1537)); + xfer += oprot->writeString((*_iter1555)); } xfer += oprot->writeListEnd(); } @@ -17872,14 +18755,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1538; - ::apache::thrift::protocol::TType _etype1541; - xfer += iprot->readListBegin(_etype1541, _size1538); - (*(this->success)).resize(_size1538); - uint32_t _i1542; - for (_i1542 = 0; _i1542 < _size1538; ++_i1542) + uint32_t _size1556; + ::apache::thrift::protocol::TType _etype1559; + xfer += iprot->readListBegin(_etype1559, _size1556); + (*(this->success)).resize(_size1556); + uint32_t _i1560; + for (_i1560 = 0; _i1560 < _size1556; ++_i1560) { - xfer += iprot->readString((*(this->success))[_i1542]); + xfer += iprot->readString((*(this->success))[_i1560]); } xfer += iprot->readListEnd(); } @@ -18189,14 +19072,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1543; - ::apache::thrift::protocol::TType _etype1546; - xfer += iprot->readListBegin(_etype1546, _size1543); - this->part_vals.resize(_size1543); - uint32_t _i1547; - for (_i1547 = 0; _i1547 < _size1543; ++_i1547) + uint32_t _size1561; + ::apache::thrift::protocol::TType _etype1564; + xfer += iprot->readListBegin(_etype1564, _size1561); + this->part_vals.resize(_size1561); + uint32_t _i1565; + for (_i1565 = 0; _i1565 < _size1561; ++_i1565) { - xfer += iprot->readString(this->part_vals[_i1547]); + xfer += iprot->readString(this->part_vals[_i1565]); } xfer += iprot->readListEnd(); } @@ -18241,10 +19124,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1548; - for (_iter1548 = this->part_vals.begin(); _iter1548 != this->part_vals.end(); ++_iter1548) + std::vector ::const_iterator _iter1566; + for (_iter1566 = this->part_vals.begin(); _iter1566 != this->part_vals.end(); ++_iter1566) { - xfer += oprot->writeString((*_iter1548)); + xfer += oprot->writeString((*_iter1566)); } xfer += oprot->writeListEnd(); } @@ -18280,10 +19163,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1549; - for (_iter1549 = (*(this->part_vals)).begin(); _iter1549 != (*(this->part_vals)).end(); ++_iter1549) + std::vector ::const_iterator _iter1567; + for (_iter1567 = (*(this->part_vals)).begin(); _iter1567 != (*(this->part_vals)).end(); ++_iter1567) { - xfer += oprot->writeString((*_iter1549)); + xfer += oprot->writeString((*_iter1567)); } xfer += oprot->writeListEnd(); } @@ -18328,14 +19211,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1550; - ::apache::thrift::protocol::TType _etype1553; - xfer += iprot->readListBegin(_etype1553, _size1550); - this->success.resize(_size1550); - uint32_t _i1554; - for (_i1554 = 0; _i1554 < _size1550; ++_i1554) + uint32_t _size1568; + ::apache::thrift::protocol::TType _etype1571; + xfer += iprot->readListBegin(_etype1571, _size1568); + this->success.resize(_size1568); + uint32_t _i1572; + for (_i1572 = 0; _i1572 < _size1568; ++_i1572) { - xfer += this->success[_i1554].read(iprot); + xfer += this->success[_i1572].read(iprot); } xfer += iprot->readListEnd(); } @@ -18382,10 +19265,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1555; - for (_iter1555 = this->success.begin(); _iter1555 != this->success.end(); ++_iter1555) + std::vector ::const_iterator _iter1573; + for (_iter1573 = this->success.begin(); _iter1573 != this->success.end(); ++_iter1573) { - xfer += (*_iter1555).write(oprot); + xfer += (*_iter1573).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18434,14 +19317,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1556; - ::apache::thrift::protocol::TType _etype1559; - xfer += iprot->readListBegin(_etype1559, _size1556); - (*(this->success)).resize(_size1556); - uint32_t _i1560; - for (_i1560 = 0; _i1560 < _size1556; ++_i1560) + uint32_t _size1574; + ::apache::thrift::protocol::TType _etype1577; + xfer += iprot->readListBegin(_etype1577, _size1574); + (*(this->success)).resize(_size1574); + uint32_t _i1578; + for (_i1578 = 0; _i1578 < _size1574; ++_i1578) { - xfer += (*(this->success))[_i1560].read(iprot); + xfer += (*(this->success))[_i1578].read(iprot); } xfer += iprot->readListEnd(); } @@ -18524,14 +19407,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1561; - ::apache::thrift::protocol::TType _etype1564; - xfer += iprot->readListBegin(_etype1564, _size1561); - this->part_vals.resize(_size1561); - uint32_t _i1565; - for (_i1565 = 0; _i1565 < _size1561; ++_i1565) + uint32_t _size1579; + ::apache::thrift::protocol::TType _etype1582; + xfer += iprot->readListBegin(_etype1582, _size1579); + this->part_vals.resize(_size1579); + uint32_t _i1583; + for (_i1583 = 0; _i1583 < _size1579; ++_i1583) { - xfer += iprot->readString(this->part_vals[_i1565]); + xfer += iprot->readString(this->part_vals[_i1583]); } xfer += iprot->readListEnd(); } @@ -18560,14 +19443,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1566; - ::apache::thrift::protocol::TType _etype1569; - xfer += iprot->readListBegin(_etype1569, _size1566); - this->group_names.resize(_size1566); - uint32_t _i1570; - for (_i1570 = 0; _i1570 < _size1566; ++_i1570) + uint32_t _size1584; + ::apache::thrift::protocol::TType _etype1587; + xfer += iprot->readListBegin(_etype1587, _size1584); + this->group_names.resize(_size1584); + uint32_t _i1588; + for (_i1588 = 0; _i1588 < _size1584; ++_i1588) { - xfer += iprot->readString(this->group_names[_i1570]); + xfer += iprot->readString(this->group_names[_i1588]); } xfer += iprot->readListEnd(); } @@ -18604,10 +19487,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1571; - for (_iter1571 = this->part_vals.begin(); _iter1571 != this->part_vals.end(); ++_iter1571) + std::vector ::const_iterator _iter1589; + for (_iter1589 = this->part_vals.begin(); _iter1589 != this->part_vals.end(); ++_iter1589) { - xfer += oprot->writeString((*_iter1571)); + xfer += oprot->writeString((*_iter1589)); } xfer += oprot->writeListEnd(); } @@ -18624,10 +19507,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1572; - for (_iter1572 = this->group_names.begin(); _iter1572 != this->group_names.end(); ++_iter1572) + std::vector ::const_iterator _iter1590; + for (_iter1590 = this->group_names.begin(); _iter1590 != this->group_names.end(); ++_iter1590) { - xfer += oprot->writeString((*_iter1572)); + xfer += oprot->writeString((*_iter1590)); } xfer += oprot->writeListEnd(); } @@ -18659,10 +19542,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1573; - for (_iter1573 = (*(this->part_vals)).begin(); _iter1573 != (*(this->part_vals)).end(); ++_iter1573) + std::vector ::const_iterator _iter1591; + for (_iter1591 = (*(this->part_vals)).begin(); _iter1591 != (*(this->part_vals)).end(); ++_iter1591) { - xfer += oprot->writeString((*_iter1573)); + xfer += oprot->writeString((*_iter1591)); } xfer += oprot->writeListEnd(); } @@ -18679,10 +19562,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1574; - for (_iter1574 = (*(this->group_names)).begin(); _iter1574 != (*(this->group_names)).end(); ++_iter1574) + std::vector ::const_iterator _iter1592; + for (_iter1592 = (*(this->group_names)).begin(); _iter1592 != (*(this->group_names)).end(); ++_iter1592) { - xfer += oprot->writeString((*_iter1574)); + xfer += oprot->writeString((*_iter1592)); } xfer += oprot->writeListEnd(); } @@ -18723,14 +19606,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1575; - ::apache::thrift::protocol::TType _etype1578; - xfer += iprot->readListBegin(_etype1578, _size1575); - this->success.resize(_size1575); - uint32_t _i1579; - for (_i1579 = 0; _i1579 < _size1575; ++_i1579) + uint32_t _size1593; + ::apache::thrift::protocol::TType _etype1596; + xfer += iprot->readListBegin(_etype1596, _size1593); + this->success.resize(_size1593); + uint32_t _i1597; + for (_i1597 = 0; _i1597 < _size1593; ++_i1597) { - xfer += this->success[_i1579].read(iprot); + xfer += this->success[_i1597].read(iprot); } xfer += iprot->readListEnd(); } @@ -18777,10 +19660,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1580; - for (_iter1580 = this->success.begin(); _iter1580 != this->success.end(); ++_iter1580) + std::vector ::const_iterator _iter1598; + for (_iter1598 = this->success.begin(); _iter1598 != this->success.end(); ++_iter1598) { - xfer += (*_iter1580).write(oprot); + xfer += (*_iter1598).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18829,14 +19712,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1581; - ::apache::thrift::protocol::TType _etype1584; - xfer += iprot->readListBegin(_etype1584, _size1581); - (*(this->success)).resize(_size1581); - uint32_t _i1585; - for (_i1585 = 0; _i1585 < _size1581; ++_i1585) + uint32_t _size1599; + ::apache::thrift::protocol::TType _etype1602; + xfer += iprot->readListBegin(_etype1602, _size1599); + (*(this->success)).resize(_size1599); + uint32_t _i1603; + for (_i1603 = 0; _i1603 < _size1599; ++_i1603) { - xfer += (*(this->success))[_i1585].read(iprot); + xfer += (*(this->success))[_i1603].read(iprot); } xfer += iprot->readListEnd(); } @@ -18919,14 +19802,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1586; - ::apache::thrift::protocol::TType _etype1589; - xfer += iprot->readListBegin(_etype1589, _size1586); - this->part_vals.resize(_size1586); - uint32_t _i1590; - for (_i1590 = 0; _i1590 < _size1586; ++_i1590) + uint32_t _size1604; + ::apache::thrift::protocol::TType _etype1607; + xfer += iprot->readListBegin(_etype1607, _size1604); + this->part_vals.resize(_size1604); + uint32_t _i1608; + for (_i1608 = 0; _i1608 < _size1604; ++_i1608) { - xfer += iprot->readString(this->part_vals[_i1590]); + xfer += iprot->readString(this->part_vals[_i1608]); } xfer += iprot->readListEnd(); } @@ -18971,10 +19854,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1591; - for (_iter1591 = this->part_vals.begin(); _iter1591 != this->part_vals.end(); ++_iter1591) + std::vector ::const_iterator _iter1609; + for (_iter1609 = this->part_vals.begin(); _iter1609 != this->part_vals.end(); ++_iter1609) { - xfer += oprot->writeString((*_iter1591)); + xfer += oprot->writeString((*_iter1609)); } xfer += oprot->writeListEnd(); } @@ -19010,10 +19893,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1592; - for (_iter1592 = (*(this->part_vals)).begin(); _iter1592 != (*(this->part_vals)).end(); ++_iter1592) + std::vector ::const_iterator _iter1610; + for (_iter1610 = (*(this->part_vals)).begin(); _iter1610 != (*(this->part_vals)).end(); ++_iter1610) { - xfer += oprot->writeString((*_iter1592)); + xfer += oprot->writeString((*_iter1610)); } xfer += oprot->writeListEnd(); } @@ -19058,14 +19941,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1593; - ::apache::thrift::protocol::TType _etype1596; - xfer += iprot->readListBegin(_etype1596, _size1593); - this->success.resize(_size1593); - uint32_t _i1597; - for (_i1597 = 0; _i1597 < _size1593; ++_i1597) + uint32_t _size1611; + ::apache::thrift::protocol::TType _etype1614; + xfer += iprot->readListBegin(_etype1614, _size1611); + this->success.resize(_size1611); + uint32_t _i1615; + for (_i1615 = 0; _i1615 < _size1611; ++_i1615) { - xfer += iprot->readString(this->success[_i1597]); + xfer += iprot->readString(this->success[_i1615]); } xfer += iprot->readListEnd(); } @@ -19112,10 +19995,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1598; - for (_iter1598 = this->success.begin(); _iter1598 != this->success.end(); ++_iter1598) + std::vector ::const_iterator _iter1616; + for (_iter1616 = this->success.begin(); _iter1616 != this->success.end(); ++_iter1616) { - xfer += oprot->writeString((*_iter1598)); + xfer += oprot->writeString((*_iter1616)); } xfer += oprot->writeListEnd(); } @@ -19164,14 +20047,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1599; - ::apache::thrift::protocol::TType _etype1602; - xfer += iprot->readListBegin(_etype1602, _size1599); - (*(this->success)).resize(_size1599); - uint32_t _i1603; - for (_i1603 = 0; _i1603 < _size1599; ++_i1603) + uint32_t _size1617; + ::apache::thrift::protocol::TType _etype1620; + xfer += iprot->readListBegin(_etype1620, _size1617); + (*(this->success)).resize(_size1617); + uint32_t _i1621; + for (_i1621 = 0; _i1621 < _size1617; ++_i1621) { - xfer += iprot->readString((*(this->success))[_i1603]); + xfer += iprot->readString((*(this->success))[_i1621]); } xfer += iprot->readListEnd(); } @@ -19365,14 +20248,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1604; - ::apache::thrift::protocol::TType _etype1607; - xfer += iprot->readListBegin(_etype1607, _size1604); - this->success.resize(_size1604); - uint32_t _i1608; - for (_i1608 = 0; _i1608 < _size1604; ++_i1608) + uint32_t _size1622; + ::apache::thrift::protocol::TType _etype1625; + xfer += iprot->readListBegin(_etype1625, _size1622); + this->success.resize(_size1622); + uint32_t _i1626; + for (_i1626 = 0; _i1626 < _size1622; ++_i1626) { - xfer += this->success[_i1608].read(iprot); + xfer += this->success[_i1626].read(iprot); } xfer += iprot->readListEnd(); } @@ -19419,10 +20302,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1609; - for (_iter1609 = this->success.begin(); _iter1609 != this->success.end(); ++_iter1609) + std::vector ::const_iterator _iter1627; + for (_iter1627 = this->success.begin(); _iter1627 != this->success.end(); ++_iter1627) { - xfer += (*_iter1609).write(oprot); + xfer += (*_iter1627).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19471,14 +20354,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1610; - ::apache::thrift::protocol::TType _etype1613; - xfer += iprot->readListBegin(_etype1613, _size1610); - (*(this->success)).resize(_size1610); - uint32_t _i1614; - for (_i1614 = 0; _i1614 < _size1610; ++_i1614) + uint32_t _size1628; + ::apache::thrift::protocol::TType _etype1631; + xfer += iprot->readListBegin(_etype1631, _size1628); + (*(this->success)).resize(_size1628); + uint32_t _i1632; + for (_i1632 = 0; _i1632 < _size1628; ++_i1632) { - xfer += (*(this->success))[_i1614].read(iprot); + xfer += (*(this->success))[_i1632].read(iprot); } xfer += iprot->readListEnd(); } @@ -19672,14 +20555,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1615; - ::apache::thrift::protocol::TType _etype1618; - xfer += iprot->readListBegin(_etype1618, _size1615); - this->success.resize(_size1615); - uint32_t _i1619; - for (_i1619 = 0; _i1619 < _size1615; ++_i1619) + uint32_t _size1633; + ::apache::thrift::protocol::TType _etype1636; + xfer += iprot->readListBegin(_etype1636, _size1633); + this->success.resize(_size1633); + uint32_t _i1637; + for (_i1637 = 0; _i1637 < _size1633; ++_i1637) { - xfer += this->success[_i1619].read(iprot); + xfer += this->success[_i1637].read(iprot); } xfer += iprot->readListEnd(); } @@ -19726,10 +20609,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1620; - for (_iter1620 = this->success.begin(); _iter1620 != this->success.end(); ++_iter1620) + std::vector ::const_iterator _iter1638; + for (_iter1638 = this->success.begin(); _iter1638 != this->success.end(); ++_iter1638) { - xfer += (*_iter1620).write(oprot); + xfer += (*_iter1638).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19778,14 +20661,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1621; - ::apache::thrift::protocol::TType _etype1624; - xfer += iprot->readListBegin(_etype1624, _size1621); - (*(this->success)).resize(_size1621); - uint32_t _i1625; - for (_i1625 = 0; _i1625 < _size1621; ++_i1625) + uint32_t _size1639; + ::apache::thrift::protocol::TType _etype1642; + xfer += iprot->readListBegin(_etype1642, _size1639); + (*(this->success)).resize(_size1639); + uint32_t _i1643; + for (_i1643 = 0; _i1643 < _size1639; ++_i1643) { - xfer += (*(this->success))[_i1625].read(iprot); + xfer += (*(this->success))[_i1643].read(iprot); } xfer += iprot->readListEnd(); } @@ -20354,14 +21237,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size1626; - ::apache::thrift::protocol::TType _etype1629; - xfer += iprot->readListBegin(_etype1629, _size1626); - this->names.resize(_size1626); - uint32_t _i1630; - for (_i1630 = 0; _i1630 < _size1626; ++_i1630) + uint32_t _size1644; + ::apache::thrift::protocol::TType _etype1647; + xfer += iprot->readListBegin(_etype1647, _size1644); + this->names.resize(_size1644); + uint32_t _i1648; + for (_i1648 = 0; _i1648 < _size1644; ++_i1648) { - xfer += iprot->readString(this->names[_i1630]); + xfer += iprot->readString(this->names[_i1648]); } xfer += iprot->readListEnd(); } @@ -20398,10 +21281,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter1631; - for (_iter1631 = this->names.begin(); _iter1631 != this->names.end(); ++_iter1631) + std::vector ::const_iterator _iter1649; + for (_iter1649 = this->names.begin(); _iter1649 != this->names.end(); ++_iter1649) { - xfer += oprot->writeString((*_iter1631)); + xfer += oprot->writeString((*_iter1649)); } xfer += oprot->writeListEnd(); } @@ -20433,10 +21316,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter1632; - for (_iter1632 = (*(this->names)).begin(); _iter1632 != (*(this->names)).end(); ++_iter1632) + std::vector ::const_iterator _iter1650; + for (_iter1650 = (*(this->names)).begin(); _iter1650 != (*(this->names)).end(); ++_iter1650) { - xfer += oprot->writeString((*_iter1632)); + xfer += oprot->writeString((*_iter1650)); } xfer += oprot->writeListEnd(); } @@ -20477,14 +21360,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1633; - ::apache::thrift::protocol::TType _etype1636; - xfer += iprot->readListBegin(_etype1636, _size1633); - this->success.resize(_size1633); - uint32_t _i1637; - for (_i1637 = 0; _i1637 < _size1633; ++_i1637) + uint32_t _size1651; + ::apache::thrift::protocol::TType _etype1654; + xfer += iprot->readListBegin(_etype1654, _size1651); + this->success.resize(_size1651); + uint32_t _i1655; + for (_i1655 = 0; _i1655 < _size1651; ++_i1655) { - xfer += this->success[_i1637].read(iprot); + xfer += this->success[_i1655].read(iprot); } xfer += iprot->readListEnd(); } @@ -20531,10 +21414,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1638; - for (_iter1638 = this->success.begin(); _iter1638 != this->success.end(); ++_iter1638) + std::vector ::const_iterator _iter1656; + for (_iter1656 = this->success.begin(); _iter1656 != this->success.end(); ++_iter1656) { - xfer += (*_iter1638).write(oprot); + xfer += (*_iter1656).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20583,14 +21466,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1639; - ::apache::thrift::protocol::TType _etype1642; - xfer += iprot->readListBegin(_etype1642, _size1639); - (*(this->success)).resize(_size1639); - uint32_t _i1643; - for (_i1643 = 0; _i1643 < _size1639; ++_i1643) + uint32_t _size1657; + ::apache::thrift::protocol::TType _etype1660; + xfer += iprot->readListBegin(_etype1660, _size1657); + (*(this->success)).resize(_size1657); + uint32_t _i1661; + for (_i1661 = 0; _i1661 < _size1657; ++_i1661) { - xfer += (*(this->success))[_i1643].read(iprot); + xfer += (*(this->success))[_i1661].read(iprot); } xfer += iprot->readListEnd(); } @@ -20912,14 +21795,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1644; - ::apache::thrift::protocol::TType _etype1647; - xfer += iprot->readListBegin(_etype1647, _size1644); - this->new_parts.resize(_size1644); - uint32_t _i1648; - for (_i1648 = 0; _i1648 < _size1644; ++_i1648) + uint32_t _size1662; + ::apache::thrift::protocol::TType _etype1665; + xfer += iprot->readListBegin(_etype1665, _size1662); + this->new_parts.resize(_size1662); + uint32_t _i1666; + for (_i1666 = 0; _i1666 < _size1662; ++_i1666) { - xfer += this->new_parts[_i1648].read(iprot); + xfer += this->new_parts[_i1666].read(iprot); } xfer += iprot->readListEnd(); } @@ -20956,10 +21839,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1649; - for (_iter1649 = this->new_parts.begin(); _iter1649 != this->new_parts.end(); ++_iter1649) + std::vector ::const_iterator _iter1667; + for (_iter1667 = this->new_parts.begin(); _iter1667 != this->new_parts.end(); ++_iter1667) { - xfer += (*_iter1649).write(oprot); + xfer += (*_iter1667).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20991,10 +21874,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1650; - for (_iter1650 = (*(this->new_parts)).begin(); _iter1650 != (*(this->new_parts)).end(); ++_iter1650) + std::vector ::const_iterator _iter1668; + for (_iter1668 = (*(this->new_parts)).begin(); _iter1668 != (*(this->new_parts)).end(); ++_iter1668) { - xfer += (*_iter1650).write(oprot); + xfer += (*_iter1668).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21179,14 +22062,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1651; - ::apache::thrift::protocol::TType _etype1654; - xfer += iprot->readListBegin(_etype1654, _size1651); - this->new_parts.resize(_size1651); - uint32_t _i1655; - for (_i1655 = 0; _i1655 < _size1651; ++_i1655) + uint32_t _size1669; + ::apache::thrift::protocol::TType _etype1672; + xfer += iprot->readListBegin(_etype1672, _size1669); + this->new_parts.resize(_size1669); + uint32_t _i1673; + for (_i1673 = 0; _i1673 < _size1669; ++_i1673) { - xfer += this->new_parts[_i1655].read(iprot); + xfer += this->new_parts[_i1673].read(iprot); } xfer += iprot->readListEnd(); } @@ -21231,10 +22114,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::wri xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1656; - for (_iter1656 = this->new_parts.begin(); _iter1656 != this->new_parts.end(); ++_iter1656) + std::vector ::const_iterator _iter1674; + for (_iter1674 = this->new_parts.begin(); _iter1674 != this->new_parts.end(); ++_iter1674) { - xfer += (*_iter1656).write(oprot); + xfer += (*_iter1674).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21270,10 +22153,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1657; - for (_iter1657 = (*(this->new_parts)).begin(); _iter1657 != (*(this->new_parts)).end(); ++_iter1657) + std::vector ::const_iterator _iter1675; + for (_iter1675 = (*(this->new_parts)).begin(); _iter1675 != (*(this->new_parts)).end(); ++_iter1675) { - xfer += (*_iter1657).write(oprot); + xfer += (*_iter1675).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21717,14 +22600,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1658; - ::apache::thrift::protocol::TType _etype1661; - xfer += iprot->readListBegin(_etype1661, _size1658); - this->part_vals.resize(_size1658); - uint32_t _i1662; - for (_i1662 = 0; _i1662 < _size1658; ++_i1662) + uint32_t _size1676; + ::apache::thrift::protocol::TType _etype1679; + xfer += iprot->readListBegin(_etype1679, _size1676); + this->part_vals.resize(_size1676); + uint32_t _i1680; + for (_i1680 = 0; _i1680 < _size1676; ++_i1680) { - xfer += iprot->readString(this->part_vals[_i1662]); + xfer += iprot->readString(this->part_vals[_i1680]); } xfer += iprot->readListEnd(); } @@ -21769,10 +22652,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1663; - for (_iter1663 = this->part_vals.begin(); _iter1663 != this->part_vals.end(); ++_iter1663) + std::vector ::const_iterator _iter1681; + for (_iter1681 = this->part_vals.begin(); _iter1681 != this->part_vals.end(); ++_iter1681) { - xfer += oprot->writeString((*_iter1663)); + xfer += oprot->writeString((*_iter1681)); } xfer += oprot->writeListEnd(); } @@ -21808,10 +22691,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1664; - for (_iter1664 = (*(this->part_vals)).begin(); _iter1664 != (*(this->part_vals)).end(); ++_iter1664) + std::vector ::const_iterator _iter1682; + for (_iter1682 = (*(this->part_vals)).begin(); _iter1682 != (*(this->part_vals)).end(); ++_iter1682) { - xfer += oprot->writeString((*_iter1664)); + xfer += oprot->writeString((*_iter1682)); } xfer += oprot->writeListEnd(); } @@ -21984,14 +22867,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1665; - ::apache::thrift::protocol::TType _etype1668; - xfer += iprot->readListBegin(_etype1668, _size1665); - this->part_vals.resize(_size1665); - uint32_t _i1669; - for (_i1669 = 0; _i1669 < _size1665; ++_i1669) + uint32_t _size1683; + ::apache::thrift::protocol::TType _etype1686; + xfer += iprot->readListBegin(_etype1686, _size1683); + this->part_vals.resize(_size1683); + uint32_t _i1687; + for (_i1687 = 0; _i1687 < _size1683; ++_i1687) { - xfer += iprot->readString(this->part_vals[_i1669]); + xfer += iprot->readString(this->part_vals[_i1687]); } xfer += iprot->readListEnd(); } @@ -22028,10 +22911,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1670; - for (_iter1670 = this->part_vals.begin(); _iter1670 != this->part_vals.end(); ++_iter1670) + std::vector ::const_iterator _iter1688; + for (_iter1688 = this->part_vals.begin(); _iter1688 != this->part_vals.end(); ++_iter1688) { - xfer += oprot->writeString((*_iter1670)); + xfer += oprot->writeString((*_iter1688)); } xfer += oprot->writeListEnd(); } @@ -22059,10 +22942,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1671; - for (_iter1671 = (*(this->part_vals)).begin(); _iter1671 != (*(this->part_vals)).end(); ++_iter1671) + std::vector ::const_iterator _iter1689; + for (_iter1689 = (*(this->part_vals)).begin(); _iter1689 != (*(this->part_vals)).end(); ++_iter1689) { - xfer += oprot->writeString((*_iter1671)); + xfer += oprot->writeString((*_iter1689)); } xfer += oprot->writeListEnd(); } @@ -22537,14 +23420,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1672; - ::apache::thrift::protocol::TType _etype1675; - xfer += iprot->readListBegin(_etype1675, _size1672); - this->success.resize(_size1672); - uint32_t _i1676; - for (_i1676 = 0; _i1676 < _size1672; ++_i1676) + uint32_t _size1690; + ::apache::thrift::protocol::TType _etype1693; + xfer += iprot->readListBegin(_etype1693, _size1690); + this->success.resize(_size1690); + uint32_t _i1694; + for (_i1694 = 0; _i1694 < _size1690; ++_i1694) { - xfer += iprot->readString(this->success[_i1676]); + xfer += iprot->readString(this->success[_i1694]); } xfer += iprot->readListEnd(); } @@ -22583,10 +23466,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1677; - for (_iter1677 = this->success.begin(); _iter1677 != this->success.end(); ++_iter1677) + std::vector ::const_iterator _iter1695; + for (_iter1695 = this->success.begin(); _iter1695 != this->success.end(); ++_iter1695) { - xfer += oprot->writeString((*_iter1677)); + xfer += oprot->writeString((*_iter1695)); } xfer += oprot->writeListEnd(); } @@ -22631,14 +23514,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1678; - ::apache::thrift::protocol::TType _etype1681; - xfer += iprot->readListBegin(_etype1681, _size1678); - (*(this->success)).resize(_size1678); - uint32_t _i1682; - for (_i1682 = 0; _i1682 < _size1678; ++_i1682) + uint32_t _size1696; + ::apache::thrift::protocol::TType _etype1699; + xfer += iprot->readListBegin(_etype1699, _size1696); + (*(this->success)).resize(_size1696); + uint32_t _i1700; + for (_i1700 = 0; _i1700 < _size1696; ++_i1700) { - xfer += iprot->readString((*(this->success))[_i1682]); + xfer += iprot->readString((*(this->success))[_i1700]); } xfer += iprot->readListEnd(); } @@ -22776,17 +23659,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1683; - ::apache::thrift::protocol::TType _ktype1684; - ::apache::thrift::protocol::TType _vtype1685; - xfer += iprot->readMapBegin(_ktype1684, _vtype1685, _size1683); - uint32_t _i1687; - for (_i1687 = 0; _i1687 < _size1683; ++_i1687) + uint32_t _size1701; + ::apache::thrift::protocol::TType _ktype1702; + ::apache::thrift::protocol::TType _vtype1703; + xfer += iprot->readMapBegin(_ktype1702, _vtype1703, _size1701); + uint32_t _i1705; + for (_i1705 = 0; _i1705 < _size1701; ++_i1705) { - std::string _key1688; - xfer += iprot->readString(_key1688); - std::string& _val1689 = this->success[_key1688]; - xfer += iprot->readString(_val1689); + std::string _key1706; + xfer += iprot->readString(_key1706); + std::string& _val1707 = this->success[_key1706]; + xfer += iprot->readString(_val1707); } xfer += iprot->readMapEnd(); } @@ -22825,11 +23708,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter1690; - for (_iter1690 = this->success.begin(); _iter1690 != this->success.end(); ++_iter1690) + std::map ::const_iterator _iter1708; + for (_iter1708 = this->success.begin(); _iter1708 != this->success.end(); ++_iter1708) { - xfer += oprot->writeString(_iter1690->first); - xfer += oprot->writeString(_iter1690->second); + xfer += oprot->writeString(_iter1708->first); + xfer += oprot->writeString(_iter1708->second); } xfer += oprot->writeMapEnd(); } @@ -22874,17 +23757,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1691; - ::apache::thrift::protocol::TType _ktype1692; - ::apache::thrift::protocol::TType _vtype1693; - xfer += iprot->readMapBegin(_ktype1692, _vtype1693, _size1691); - uint32_t _i1695; - for (_i1695 = 0; _i1695 < _size1691; ++_i1695) + uint32_t _size1709; + ::apache::thrift::protocol::TType _ktype1710; + ::apache::thrift::protocol::TType _vtype1711; + xfer += iprot->readMapBegin(_ktype1710, _vtype1711, _size1709); + uint32_t _i1713; + for (_i1713 = 0; _i1713 < _size1709; ++_i1713) { - std::string _key1696; - xfer += iprot->readString(_key1696); - std::string& _val1697 = (*(this->success))[_key1696]; - xfer += iprot->readString(_val1697); + std::string _key1714; + xfer += iprot->readString(_key1714); + std::string& _val1715 = (*(this->success))[_key1714]; + xfer += iprot->readString(_val1715); } xfer += iprot->readMapEnd(); } @@ -22959,17 +23842,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1698; - ::apache::thrift::protocol::TType _ktype1699; - ::apache::thrift::protocol::TType _vtype1700; - xfer += iprot->readMapBegin(_ktype1699, _vtype1700, _size1698); - uint32_t _i1702; - for (_i1702 = 0; _i1702 < _size1698; ++_i1702) + uint32_t _size1716; + ::apache::thrift::protocol::TType _ktype1717; + ::apache::thrift::protocol::TType _vtype1718; + xfer += iprot->readMapBegin(_ktype1717, _vtype1718, _size1716); + uint32_t _i1720; + for (_i1720 = 0; _i1720 < _size1716; ++_i1720) { - std::string _key1703; - xfer += iprot->readString(_key1703); - std::string& _val1704 = this->part_vals[_key1703]; - xfer += iprot->readString(_val1704); + std::string _key1721; + xfer += iprot->readString(_key1721); + std::string& _val1722 = this->part_vals[_key1721]; + xfer += iprot->readString(_val1722); } xfer += iprot->readMapEnd(); } @@ -22980,9 +23863,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1705; - xfer += iprot->readI32(ecast1705); - this->eventType = (PartitionEventType::type)ecast1705; + int32_t ecast1723; + xfer += iprot->readI32(ecast1723); + this->eventType = (PartitionEventType::type)ecast1723; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -23016,11 +23899,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1706; - for (_iter1706 = this->part_vals.begin(); _iter1706 != this->part_vals.end(); ++_iter1706) + std::map ::const_iterator _iter1724; + for (_iter1724 = this->part_vals.begin(); _iter1724 != this->part_vals.end(); ++_iter1724) { - xfer += oprot->writeString(_iter1706->first); - xfer += oprot->writeString(_iter1706->second); + xfer += oprot->writeString(_iter1724->first); + xfer += oprot->writeString(_iter1724->second); } xfer += oprot->writeMapEnd(); } @@ -23056,11 +23939,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1707; - for (_iter1707 = (*(this->part_vals)).begin(); _iter1707 != (*(this->part_vals)).end(); ++_iter1707) + std::map ::const_iterator _iter1725; + for (_iter1725 = (*(this->part_vals)).begin(); _iter1725 != (*(this->part_vals)).end(); ++_iter1725) { - xfer += oprot->writeString(_iter1707->first); - xfer += oprot->writeString(_iter1707->second); + xfer += oprot->writeString(_iter1725->first); + xfer += oprot->writeString(_iter1725->second); } xfer += oprot->writeMapEnd(); } @@ -23329,17 +24212,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1708; - ::apache::thrift::protocol::TType _ktype1709; - ::apache::thrift::protocol::TType _vtype1710; - xfer += iprot->readMapBegin(_ktype1709, _vtype1710, _size1708); - uint32_t _i1712; - for (_i1712 = 0; _i1712 < _size1708; ++_i1712) + uint32_t _size1726; + ::apache::thrift::protocol::TType _ktype1727; + ::apache::thrift::protocol::TType _vtype1728; + xfer += iprot->readMapBegin(_ktype1727, _vtype1728, _size1726); + uint32_t _i1730; + for (_i1730 = 0; _i1730 < _size1726; ++_i1730) { - std::string _key1713; - xfer += iprot->readString(_key1713); - std::string& _val1714 = this->part_vals[_key1713]; - xfer += iprot->readString(_val1714); + std::string _key1731; + xfer += iprot->readString(_key1731); + std::string& _val1732 = this->part_vals[_key1731]; + xfer += iprot->readString(_val1732); } xfer += iprot->readMapEnd(); } @@ -23350,9 +24233,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1715; - xfer += iprot->readI32(ecast1715); - this->eventType = (PartitionEventType::type)ecast1715; + int32_t ecast1733; + xfer += iprot->readI32(ecast1733); + this->eventType = (PartitionEventType::type)ecast1733; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -23386,11 +24269,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1716; - for (_iter1716 = this->part_vals.begin(); _iter1716 != this->part_vals.end(); ++_iter1716) + std::map ::const_iterator _iter1734; + for (_iter1734 = this->part_vals.begin(); _iter1734 != this->part_vals.end(); ++_iter1734) { - xfer += oprot->writeString(_iter1716->first); - xfer += oprot->writeString(_iter1716->second); + xfer += oprot->writeString(_iter1734->first); + xfer += oprot->writeString(_iter1734->second); } xfer += oprot->writeMapEnd(); } @@ -23426,11 +24309,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1717; - for (_iter1717 = (*(this->part_vals)).begin(); _iter1717 != (*(this->part_vals)).end(); ++_iter1717) + std::map ::const_iterator _iter1735; + for (_iter1735 = (*(this->part_vals)).begin(); _iter1735 != (*(this->part_vals)).end(); ++_iter1735) { - xfer += oprot->writeString(_iter1717->first); - xfer += oprot->writeString(_iter1717->second); + xfer += oprot->writeString(_iter1735->first); + xfer += oprot->writeString(_iter1735->second); } xfer += oprot->writeMapEnd(); } @@ -28579,14 +29462,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1718; - ::apache::thrift::protocol::TType _etype1721; - xfer += iprot->readListBegin(_etype1721, _size1718); - this->success.resize(_size1718); - uint32_t _i1722; - for (_i1722 = 0; _i1722 < _size1718; ++_i1722) + uint32_t _size1736; + ::apache::thrift::protocol::TType _etype1739; + xfer += iprot->readListBegin(_etype1739, _size1736); + this->success.resize(_size1736); + uint32_t _i1740; + for (_i1740 = 0; _i1740 < _size1736; ++_i1740) { - xfer += iprot->readString(this->success[_i1722]); + xfer += iprot->readString(this->success[_i1740]); } xfer += iprot->readListEnd(); } @@ -28625,10 +29508,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1723; - for (_iter1723 = this->success.begin(); _iter1723 != this->success.end(); ++_iter1723) + std::vector ::const_iterator _iter1741; + for (_iter1741 = this->success.begin(); _iter1741 != this->success.end(); ++_iter1741) { - xfer += oprot->writeString((*_iter1723)); + xfer += oprot->writeString((*_iter1741)); } xfer += oprot->writeListEnd(); } @@ -28673,14 +29556,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1724; - ::apache::thrift::protocol::TType _etype1727; - xfer += iprot->readListBegin(_etype1727, _size1724); - (*(this->success)).resize(_size1724); - uint32_t _i1728; - for (_i1728 = 0; _i1728 < _size1724; ++_i1728) + uint32_t _size1742; + ::apache::thrift::protocol::TType _etype1745; + xfer += iprot->readListBegin(_etype1745, _size1742); + (*(this->success)).resize(_size1742); + uint32_t _i1746; + for (_i1746 = 0; _i1746 < _size1742; ++_i1746) { - xfer += iprot->readString((*(this->success))[_i1728]); + xfer += iprot->readString((*(this->success))[_i1746]); } xfer += iprot->readListEnd(); } @@ -29640,14 +30523,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1729; - ::apache::thrift::protocol::TType _etype1732; - xfer += iprot->readListBegin(_etype1732, _size1729); - this->success.resize(_size1729); - uint32_t _i1733; - for (_i1733 = 0; _i1733 < _size1729; ++_i1733) + uint32_t _size1747; + ::apache::thrift::protocol::TType _etype1750; + xfer += iprot->readListBegin(_etype1750, _size1747); + this->success.resize(_size1747); + uint32_t _i1751; + for (_i1751 = 0; _i1751 < _size1747; ++_i1751) { - xfer += iprot->readString(this->success[_i1733]); + xfer += iprot->readString(this->success[_i1751]); } xfer += iprot->readListEnd(); } @@ -29686,10 +30569,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1734; - for (_iter1734 = this->success.begin(); _iter1734 != this->success.end(); ++_iter1734) + std::vector ::const_iterator _iter1752; + for (_iter1752 = this->success.begin(); _iter1752 != this->success.end(); ++_iter1752) { - xfer += oprot->writeString((*_iter1734)); + xfer += oprot->writeString((*_iter1752)); } xfer += oprot->writeListEnd(); } @@ -29734,14 +30617,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1735; - ::apache::thrift::protocol::TType _etype1738; - xfer += iprot->readListBegin(_etype1738, _size1735); - (*(this->success)).resize(_size1735); - uint32_t _i1739; - for (_i1739 = 0; _i1739 < _size1735; ++_i1739) + uint32_t _size1753; + ::apache::thrift::protocol::TType _etype1756; + xfer += iprot->readListBegin(_etype1756, _size1753); + (*(this->success)).resize(_size1753); + uint32_t _i1757; + for (_i1757 = 0; _i1757 < _size1753; ++_i1757) { - xfer += iprot->readString((*(this->success))[_i1739]); + xfer += iprot->readString((*(this->success))[_i1757]); } xfer += iprot->readListEnd(); } @@ -29814,9 +30697,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1740; - xfer += iprot->readI32(ecast1740); - this->principal_type = (PrincipalType::type)ecast1740; + int32_t ecast1758; + xfer += iprot->readI32(ecast1758); + this->principal_type = (PrincipalType::type)ecast1758; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -29832,9 +30715,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1741; - xfer += iprot->readI32(ecast1741); - this->grantorType = (PrincipalType::type)ecast1741; + int32_t ecast1759; + xfer += iprot->readI32(ecast1759); + this->grantorType = (PrincipalType::type)ecast1759; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -30105,9 +30988,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1742; - xfer += iprot->readI32(ecast1742); - this->principal_type = (PrincipalType::type)ecast1742; + int32_t ecast1760; + xfer += iprot->readI32(ecast1760); + this->principal_type = (PrincipalType::type)ecast1760; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -30338,9 +31221,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1743; - xfer += iprot->readI32(ecast1743); - this->principal_type = (PrincipalType::type)ecast1743; + int32_t ecast1761; + xfer += iprot->readI32(ecast1761); + this->principal_type = (PrincipalType::type)ecast1761; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -30429,14 +31312,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1744; - ::apache::thrift::protocol::TType _etype1747; - xfer += iprot->readListBegin(_etype1747, _size1744); - this->success.resize(_size1744); - uint32_t _i1748; - for (_i1748 = 0; _i1748 < _size1744; ++_i1748) + uint32_t _size1762; + ::apache::thrift::protocol::TType _etype1765; + xfer += iprot->readListBegin(_etype1765, _size1762); + this->success.resize(_size1762); + uint32_t _i1766; + for (_i1766 = 0; _i1766 < _size1762; ++_i1766) { - xfer += this->success[_i1748].read(iprot); + xfer += this->success[_i1766].read(iprot); } xfer += iprot->readListEnd(); } @@ -30475,10 +31358,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1749; - for (_iter1749 = this->success.begin(); _iter1749 != this->success.end(); ++_iter1749) + std::vector ::const_iterator _iter1767; + for (_iter1767 = this->success.begin(); _iter1767 != this->success.end(); ++_iter1767) { - xfer += (*_iter1749).write(oprot); + xfer += (*_iter1767).write(oprot); } xfer += oprot->writeListEnd(); } @@ -30523,14 +31406,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1750; - ::apache::thrift::protocol::TType _etype1753; - xfer += iprot->readListBegin(_etype1753, _size1750); - (*(this->success)).resize(_size1750); - uint32_t _i1754; - for (_i1754 = 0; _i1754 < _size1750; ++_i1754) + uint32_t _size1768; + ::apache::thrift::protocol::TType _etype1771; + xfer += iprot->readListBegin(_etype1771, _size1768); + (*(this->success)).resize(_size1768); + uint32_t _i1772; + for (_i1772 = 0; _i1772 < _size1768; ++_i1772) { - xfer += (*(this->success))[_i1754].read(iprot); + xfer += (*(this->success))[_i1772].read(iprot); } xfer += iprot->readListEnd(); } @@ -31226,14 +32109,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1755; - ::apache::thrift::protocol::TType _etype1758; - xfer += iprot->readListBegin(_etype1758, _size1755); - this->group_names.resize(_size1755); - uint32_t _i1759; - for (_i1759 = 0; _i1759 < _size1755; ++_i1759) + uint32_t _size1773; + ::apache::thrift::protocol::TType _etype1776; + xfer += iprot->readListBegin(_etype1776, _size1773); + this->group_names.resize(_size1773); + uint32_t _i1777; + for (_i1777 = 0; _i1777 < _size1773; ++_i1777) { - xfer += iprot->readString(this->group_names[_i1759]); + xfer += iprot->readString(this->group_names[_i1777]); } xfer += iprot->readListEnd(); } @@ -31270,10 +32153,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1760; - for (_iter1760 = this->group_names.begin(); _iter1760 != this->group_names.end(); ++_iter1760) + std::vector ::const_iterator _iter1778; + for (_iter1778 = this->group_names.begin(); _iter1778 != this->group_names.end(); ++_iter1778) { - xfer += oprot->writeString((*_iter1760)); + xfer += oprot->writeString((*_iter1778)); } xfer += oprot->writeListEnd(); } @@ -31305,10 +32188,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1761; - for (_iter1761 = (*(this->group_names)).begin(); _iter1761 != (*(this->group_names)).end(); ++_iter1761) + std::vector ::const_iterator _iter1779; + for (_iter1779 = (*(this->group_names)).begin(); _iter1779 != (*(this->group_names)).end(); ++_iter1779) { - xfer += oprot->writeString((*_iter1761)); + xfer += oprot->writeString((*_iter1779)); } xfer += oprot->writeListEnd(); } @@ -31483,9 +32366,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1762; - xfer += iprot->readI32(ecast1762); - this->principal_type = (PrincipalType::type)ecast1762; + int32_t ecast1780; + xfer += iprot->readI32(ecast1780); + this->principal_type = (PrincipalType::type)ecast1780; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -31590,14 +32473,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1763; - ::apache::thrift::protocol::TType _etype1766; - xfer += iprot->readListBegin(_etype1766, _size1763); - this->success.resize(_size1763); - uint32_t _i1767; - for (_i1767 = 0; _i1767 < _size1763; ++_i1767) + uint32_t _size1781; + ::apache::thrift::protocol::TType _etype1784; + xfer += iprot->readListBegin(_etype1784, _size1781); + this->success.resize(_size1781); + uint32_t _i1785; + for (_i1785 = 0; _i1785 < _size1781; ++_i1785) { - xfer += this->success[_i1767].read(iprot); + xfer += this->success[_i1785].read(iprot); } xfer += iprot->readListEnd(); } @@ -31636,10 +32519,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1768; - for (_iter1768 = this->success.begin(); _iter1768 != this->success.end(); ++_iter1768) + std::vector ::const_iterator _iter1786; + for (_iter1786 = this->success.begin(); _iter1786 != this->success.end(); ++_iter1786) { - xfer += (*_iter1768).write(oprot); + xfer += (*_iter1786).write(oprot); } xfer += oprot->writeListEnd(); } @@ -31684,14 +32567,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1769; - ::apache::thrift::protocol::TType _etype1772; - xfer += iprot->readListBegin(_etype1772, _size1769); - (*(this->success)).resize(_size1769); - uint32_t _i1773; - for (_i1773 = 0; _i1773 < _size1769; ++_i1773) + uint32_t _size1787; + ::apache::thrift::protocol::TType _etype1790; + xfer += iprot->readListBegin(_etype1790, _size1787); + (*(this->success)).resize(_size1787); + uint32_t _i1791; + for (_i1791 = 0; _i1791 < _size1787; ++_i1791) { - xfer += (*(this->success))[_i1773].read(iprot); + xfer += (*(this->success))[_i1791].read(iprot); } xfer += iprot->readListEnd(); } @@ -32379,14 +33262,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1774; - ::apache::thrift::protocol::TType _etype1777; - xfer += iprot->readListBegin(_etype1777, _size1774); - this->group_names.resize(_size1774); - uint32_t _i1778; - for (_i1778 = 0; _i1778 < _size1774; ++_i1778) + uint32_t _size1792; + ::apache::thrift::protocol::TType _etype1795; + xfer += iprot->readListBegin(_etype1795, _size1792); + this->group_names.resize(_size1792); + uint32_t _i1796; + for (_i1796 = 0; _i1796 < _size1792; ++_i1796) { - xfer += iprot->readString(this->group_names[_i1778]); + xfer += iprot->readString(this->group_names[_i1796]); } xfer += iprot->readListEnd(); } @@ -32419,10 +33302,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1779; - for (_iter1779 = this->group_names.begin(); _iter1779 != this->group_names.end(); ++_iter1779) + std::vector ::const_iterator _iter1797; + for (_iter1797 = this->group_names.begin(); _iter1797 != this->group_names.end(); ++_iter1797) { - xfer += oprot->writeString((*_iter1779)); + xfer += oprot->writeString((*_iter1797)); } xfer += oprot->writeListEnd(); } @@ -32450,10 +33333,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1780; - for (_iter1780 = (*(this->group_names)).begin(); _iter1780 != (*(this->group_names)).end(); ++_iter1780) + std::vector ::const_iterator _iter1798; + for (_iter1798 = (*(this->group_names)).begin(); _iter1798 != (*(this->group_names)).end(); ++_iter1798) { - xfer += oprot->writeString((*_iter1780)); + xfer += oprot->writeString((*_iter1798)); } xfer += oprot->writeListEnd(); } @@ -32494,14 +33377,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1781; - ::apache::thrift::protocol::TType _etype1784; - xfer += iprot->readListBegin(_etype1784, _size1781); - this->success.resize(_size1781); - uint32_t _i1785; - for (_i1785 = 0; _i1785 < _size1781; ++_i1785) + uint32_t _size1799; + ::apache::thrift::protocol::TType _etype1802; + xfer += iprot->readListBegin(_etype1802, _size1799); + this->success.resize(_size1799); + uint32_t _i1803; + for (_i1803 = 0; _i1803 < _size1799; ++_i1803) { - xfer += iprot->readString(this->success[_i1785]); + xfer += iprot->readString(this->success[_i1803]); } xfer += iprot->readListEnd(); } @@ -32540,10 +33423,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1786; - for (_iter1786 = this->success.begin(); _iter1786 != this->success.end(); ++_iter1786) + std::vector ::const_iterator _iter1804; + for (_iter1804 = this->success.begin(); _iter1804 != this->success.end(); ++_iter1804) { - xfer += oprot->writeString((*_iter1786)); + xfer += oprot->writeString((*_iter1804)); } xfer += oprot->writeListEnd(); } @@ -32588,14 +33471,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1787; - ::apache::thrift::protocol::TType _etype1790; - xfer += iprot->readListBegin(_etype1790, _size1787); - (*(this->success)).resize(_size1787); - uint32_t _i1791; - for (_i1791 = 0; _i1791 < _size1787; ++_i1791) + uint32_t _size1805; + ::apache::thrift::protocol::TType _etype1808; + xfer += iprot->readListBegin(_etype1808, _size1805); + (*(this->success)).resize(_size1805); + uint32_t _i1809; + for (_i1809 = 0; _i1809 < _size1805; ++_i1809) { - xfer += iprot->readString((*(this->success))[_i1791]); + xfer += iprot->readString((*(this->success))[_i1809]); } xfer += iprot->readListEnd(); } @@ -33906,14 +34789,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1792; - ::apache::thrift::protocol::TType _etype1795; - xfer += iprot->readListBegin(_etype1795, _size1792); - this->success.resize(_size1792); - uint32_t _i1796; - for (_i1796 = 0; _i1796 < _size1792; ++_i1796) + uint32_t _size1810; + ::apache::thrift::protocol::TType _etype1813; + xfer += iprot->readListBegin(_etype1813, _size1810); + this->success.resize(_size1810); + uint32_t _i1814; + for (_i1814 = 0; _i1814 < _size1810; ++_i1814) { - xfer += iprot->readString(this->success[_i1796]); + xfer += iprot->readString(this->success[_i1814]); } xfer += iprot->readListEnd(); } @@ -33944,10 +34827,10 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1797; - for (_iter1797 = this->success.begin(); _iter1797 != this->success.end(); ++_iter1797) + std::vector ::const_iterator _iter1815; + for (_iter1815 = this->success.begin(); _iter1815 != this->success.end(); ++_iter1815) { - xfer += oprot->writeString((*_iter1797)); + xfer += oprot->writeString((*_iter1815)); } xfer += oprot->writeListEnd(); } @@ -33988,14 +34871,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1798; - ::apache::thrift::protocol::TType _etype1801; - xfer += iprot->readListBegin(_etype1801, _size1798); - (*(this->success)).resize(_size1798); - uint32_t _i1802; - for (_i1802 = 0; _i1802 < _size1798; ++_i1802) + uint32_t _size1816; + ::apache::thrift::protocol::TType _etype1819; + xfer += iprot->readListBegin(_etype1819, _size1816); + (*(this->success)).resize(_size1816); + uint32_t _i1820; + for (_i1820 = 0; _i1820 < _size1816; ++_i1820) { - xfer += iprot->readString((*(this->success))[_i1802]); + xfer += iprot->readString((*(this->success))[_i1820]); } xfer += iprot->readListEnd(); } @@ -34721,14 +35604,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1803; - ::apache::thrift::protocol::TType _etype1806; - xfer += iprot->readListBegin(_etype1806, _size1803); - this->success.resize(_size1803); - uint32_t _i1807; - for (_i1807 = 0; _i1807 < _size1803; ++_i1807) + uint32_t _size1821; + ::apache::thrift::protocol::TType _etype1824; + xfer += iprot->readListBegin(_etype1824, _size1821); + this->success.resize(_size1821); + uint32_t _i1825; + for (_i1825 = 0; _i1825 < _size1821; ++_i1825) { - xfer += iprot->readString(this->success[_i1807]); + xfer += iprot->readString(this->success[_i1825]); } xfer += iprot->readListEnd(); } @@ -34759,10 +35642,10 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1808; - for (_iter1808 = this->success.begin(); _iter1808 != this->success.end(); ++_iter1808) + std::vector ::const_iterator _iter1826; + for (_iter1826 = this->success.begin(); _iter1826 != this->success.end(); ++_iter1826) { - xfer += oprot->writeString((*_iter1808)); + xfer += oprot->writeString((*_iter1826)); } xfer += oprot->writeListEnd(); } @@ -34803,14 +35686,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1809; - ::apache::thrift::protocol::TType _etype1812; - xfer += iprot->readListBegin(_etype1812, _size1809); - (*(this->success)).resize(_size1809); - uint32_t _i1813; - for (_i1813 = 0; _i1813 < _size1809; ++_i1813) + uint32_t _size1827; + ::apache::thrift::protocol::TType _etype1830; + xfer += iprot->readListBegin(_etype1830, _size1827); + (*(this->success)).resize(_size1827); + uint32_t _i1831; + for (_i1831 = 0; _i1831 < _size1827; ++_i1831) { - xfer += iprot->readString((*(this->success))[_i1813]); + xfer += iprot->readString((*(this->success))[_i1831]); } xfer += iprot->readListEnd(); } @@ -46451,14 +47334,14 @@ uint32_t ThriftHiveMetastore_get_schema_all_versions_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1814; - ::apache::thrift::protocol::TType _etype1817; - xfer += iprot->readListBegin(_etype1817, _size1814); - this->success.resize(_size1814); - uint32_t _i1818; - for (_i1818 = 0; _i1818 < _size1814; ++_i1818) + uint32_t _size1832; + ::apache::thrift::protocol::TType _etype1835; + xfer += iprot->readListBegin(_etype1835, _size1832); + this->success.resize(_size1832); + uint32_t _i1836; + for (_i1836 = 0; _i1836 < _size1832; ++_i1836) { - xfer += this->success[_i1818].read(iprot); + xfer += this->success[_i1836].read(iprot); } xfer += iprot->readListEnd(); } @@ -46505,10 +47388,10 @@ uint32_t ThriftHiveMetastore_get_schema_all_versions_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1819; - for (_iter1819 = this->success.begin(); _iter1819 != this->success.end(); ++_iter1819) + std::vector ::const_iterator _iter1837; + for (_iter1837 = this->success.begin(); _iter1837 != this->success.end(); ++_iter1837) { - xfer += (*_iter1819).write(oprot); + xfer += (*_iter1837).write(oprot); } xfer += oprot->writeListEnd(); } @@ -46557,14 +47440,14 @@ uint32_t ThriftHiveMetastore_get_schema_all_versions_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1820; - ::apache::thrift::protocol::TType _etype1823; - xfer += iprot->readListBegin(_etype1823, _size1820); - (*(this->success)).resize(_size1820); - uint32_t _i1824; - for (_i1824 = 0; _i1824 < _size1820; ++_i1824) + uint32_t _size1838; + ::apache::thrift::protocol::TType _etype1841; + xfer += iprot->readListBegin(_etype1841, _size1838); + (*(this->success)).resize(_size1838); + uint32_t _i1842; + for (_i1842 = 0; _i1842 < _size1838; ++_i1842) { - xfer += (*(this->success))[_i1824].read(iprot); + xfer += (*(this->success))[_i1842].read(iprot); } xfer += iprot->readListEnd(); } @@ -48001,6 +48884,254 @@ void ThriftHiveMetastoreClient::recv_setMetaConf() return; } +void ThriftHiveMetastoreClient::create_catalog(const CreateCatalogRequest& catalog) +{ + send_create_catalog(catalog); + recv_create_catalog(); +} + +void ThriftHiveMetastoreClient::send_create_catalog(const CreateCatalogRequest& catalog) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("create_catalog", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_create_catalog_pargs args; + args.catalog = &catalog; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_create_catalog() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("create_catalog") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_create_catalog_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + return; +} + +void ThriftHiveMetastoreClient::get_catalog(GetCatalogResponse& _return, const GetCatalogRequest& catName) +{ + send_get_catalog(catName); + recv_get_catalog(_return); +} + +void ThriftHiveMetastoreClient::send_get_catalog(const GetCatalogRequest& catName) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_catalog", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_catalog_pargs args; + args.catName = &catName; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_catalog(GetCatalogResponse& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_catalog") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_catalog_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_catalog failed: unknown result"); +} + +void ThriftHiveMetastoreClient::get_catalogs(GetCatalogsResponse& _return) +{ + send_get_catalogs(); + recv_get_catalogs(_return); +} + +void ThriftHiveMetastoreClient::send_get_catalogs() +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_catalogs", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_catalogs_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_catalogs(GetCatalogsResponse& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_catalogs") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_catalogs_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_catalogs failed: unknown result"); +} + +void ThriftHiveMetastoreClient::drop_catalog(const DropCatalogRequest& catName) +{ + send_drop_catalog(catName); + recv_drop_catalog(); +} + +void ThriftHiveMetastoreClient::send_drop_catalog(const DropCatalogRequest& catName) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("drop_catalog", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_drop_catalog_pargs args; + args.catName = &catName; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_drop_catalog() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_catalog") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_drop_catalog_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + return; +} + void ThriftHiveMetastoreClient::create_database(const Database& database) { send_create_database(database); @@ -50331,18 +51462,19 @@ void ThriftHiveMetastoreClient::recv_get_materialization_invalidation_info(std:: throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_materialization_invalidation_info failed: unknown result"); } -void ThriftHiveMetastoreClient::update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) +void ThriftHiveMetastoreClient::update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { - send_update_creation_metadata(dbname, tbl_name, creation_metadata); + send_update_creation_metadata(catName, dbname, tbl_name, creation_metadata); recv_update_creation_metadata(); } -void ThriftHiveMetastoreClient::send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) +void ThriftHiveMetastoreClient::send_update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { int32_t cseqid = 0; oprot_->writeMessageBegin("update_creation_metadata", ::apache::thrift::protocol::T_CALL, cseqid); ThriftHiveMetastore_update_creation_metadata_pargs args; + args.catName = &catName; args.dbname = &dbname; args.tbl_name = &tbl_name; args.creation_metadata = &creation_metadata; @@ -60423,6 +61555,247 @@ void ThriftHiveMetastoreProcessor::process_setMetaConf(int32_t seqid, ::apache:: } } +void ThriftHiveMetastoreProcessor::process_create_catalog(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_catalog", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_catalog"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_catalog"); + } + + ThriftHiveMetastore_create_catalog_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_catalog", bytes); + } + + ThriftHiveMetastore_create_catalog_result result; + try { + iface_->create_catalog(args.catalog); + } catch (AlreadyExistsException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_catalog"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("create_catalog", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_catalog"); + } + + oprot->writeMessageBegin("create_catalog", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_catalog", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_catalog(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_catalog", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_catalog"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_catalog"); + } + + ThriftHiveMetastore_get_catalog_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_catalog", bytes); + } + + ThriftHiveMetastore_get_catalog_result result; + try { + iface_->get_catalog(result.success, args.catName); + result.__isset.success = true; + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_catalog"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_catalog", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_catalog"); + } + + oprot->writeMessageBegin("get_catalog", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_catalog", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_catalogs(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_catalogs", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_catalogs"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_catalogs"); + } + + ThriftHiveMetastore_get_catalogs_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_catalogs", bytes); + } + + ThriftHiveMetastore_get_catalogs_result result; + try { + iface_->get_catalogs(result.success); + result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_catalogs"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_catalogs", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_catalogs"); + } + + oprot->writeMessageBegin("get_catalogs", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_catalogs", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_drop_catalog(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_catalog", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_catalog"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_catalog"); + } + + ThriftHiveMetastore_drop_catalog_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_catalog", bytes); + } + + ThriftHiveMetastore_drop_catalog_result result; + try { + iface_->drop_catalog(args.catName); + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidOperationException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_catalog"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("drop_catalog", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_catalog"); + } + + oprot->writeMessageBegin("drop_catalog", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_catalog", bytes); + } +} + void ThriftHiveMetastoreProcessor::process_create_database(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; @@ -62662,7 +64035,7 @@ void ThriftHiveMetastoreProcessor::process_update_creation_metadata(int32_t seqi ThriftHiveMetastore_update_creation_metadata_result result; try { - iface_->update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata); + iface_->update_creation_metadata(args.catName, args.dbname, args.tbl_name, args.creation_metadata); } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; @@ -72155,6 +73528,365 @@ void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) } // end while(true) } +void ThriftHiveMetastoreConcurrentClient::create_catalog(const CreateCatalogRequest& catalog) +{ + int32_t seqid = send_create_catalog(catalog); + recv_create_catalog(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_create_catalog(const CreateCatalogRequest& catalog) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("create_catalog", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_create_catalog_pargs args; + args.catalog = &catalog; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_create_catalog(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("create_catalog") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_create_catalog_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::get_catalog(GetCatalogResponse& _return, const GetCatalogRequest& catName) +{ + int32_t seqid = send_get_catalog(catName); + recv_get_catalog(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_catalog(const GetCatalogRequest& catName) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_catalog", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_catalog_pargs args; + args.catName = &catName; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_catalog(GetCatalogResponse& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_catalog") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_catalog_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_catalog failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::get_catalogs(GetCatalogsResponse& _return) +{ + int32_t seqid = send_get_catalogs(); + recv_get_catalogs(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_catalogs() +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_catalogs", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_catalogs_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_catalogs(GetCatalogsResponse& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_catalogs") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_catalogs_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_catalogs failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::drop_catalog(const DropCatalogRequest& catName) +{ + int32_t seqid = send_drop_catalog(catName); + recv_drop_catalog(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_catalog(const DropCatalogRequest& catName) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("drop_catalog", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_drop_catalog_pargs args; + args.catName = &catName; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_drop_catalog(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_catalog") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_drop_catalog_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + void ThriftHiveMetastoreConcurrentClient::create_database(const Database& database) { int32_t seqid = send_create_database(database); @@ -75509,19 +77241,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_materialization_invalidation_ } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) +void ThriftHiveMetastoreConcurrentClient::update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { - int32_t seqid = send_update_creation_metadata(dbname, tbl_name, creation_metadata); + int32_t seqid = send_update_creation_metadata(catName, dbname, tbl_name, creation_metadata); recv_update_creation_metadata(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) +int32_t ThriftHiveMetastoreConcurrentClient::send_update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); oprot_->writeMessageBegin("update_creation_metadata", ::apache::thrift::protocol::T_CALL, cseqid); ThriftHiveMetastore_update_creation_metadata_pargs args; + args.catName = &catName; args.dbname = &dbname; args.tbl_name = &tbl_name; args.creation_metadata = &creation_metadata; diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index e10a655e76..b9e8e24b63 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -24,6 +24,10 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual ~ThriftHiveMetastoreIf() {} virtual void getMetaConf(std::string& _return, const std::string& key) = 0; virtual void setMetaConf(const std::string& key, const std::string& value) = 0; + virtual void create_catalog(const CreateCatalogRequest& catalog) = 0; + virtual void get_catalog(GetCatalogResponse& _return, const GetCatalogRequest& catName) = 0; + virtual void get_catalogs(GetCatalogsResponse& _return) = 0; + virtual void drop_catalog(const DropCatalogRequest& catName) = 0; virtual void create_database(const Database& database) = 0; virtual void get_database(Database& _return, const std::string& name) = 0; virtual void drop_database(const std::string& name, const bool deleteData, const bool cascade) = 0; @@ -61,7 +65,7 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void get_table_req(GetTableResult& _return, const GetTableRequest& req) = 0; virtual void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req) = 0; virtual void get_materialization_invalidation_info(std::map & _return, const std::string& dbname, const std::vector & tbl_names) = 0; - virtual void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) = 0; + virtual void update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) = 0; virtual void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) = 0; virtual void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) = 0; virtual void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) = 0; @@ -253,6 +257,18 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void setMetaConf(const std::string& /* key */, const std::string& /* value */) { return; } + void create_catalog(const CreateCatalogRequest& /* catalog */) { + return; + } + void get_catalog(GetCatalogResponse& /* _return */, const GetCatalogRequest& /* catName */) { + return; + } + void get_catalogs(GetCatalogsResponse& /* _return */) { + return; + } + void drop_catalog(const DropCatalogRequest& /* catName */) { + return; + } void create_database(const Database& /* database */) { return; } @@ -366,7 +382,7 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void get_materialization_invalidation_info(std::map & /* _return */, const std::string& /* dbname */, const std::vector & /* tbl_names */) { return; } - void update_creation_metadata(const std::string& /* dbname */, const std::string& /* tbl_name */, const CreationMetadata& /* creation_metadata */) { + void update_creation_metadata(const std::string& /* catName */, const std::string& /* dbname */, const std::string& /* tbl_name */, const CreationMetadata& /* creation_metadata */) { return; } void get_table_names_by_filter(std::vector & /* _return */, const std::string& /* dbname */, const std::string& /* filter */, const int16_t /* max_tables */) { @@ -1087,6 +1103,466 @@ class ThriftHiveMetastore_setMetaConf_presult { }; +typedef struct _ThriftHiveMetastore_create_catalog_args__isset { + _ThriftHiveMetastore_create_catalog_args__isset() : catalog(false) {} + bool catalog :1; +} _ThriftHiveMetastore_create_catalog_args__isset; + +class ThriftHiveMetastore_create_catalog_args { + public: + + ThriftHiveMetastore_create_catalog_args(const ThriftHiveMetastore_create_catalog_args&); + ThriftHiveMetastore_create_catalog_args& operator=(const ThriftHiveMetastore_create_catalog_args&); + ThriftHiveMetastore_create_catalog_args() { + } + + virtual ~ThriftHiveMetastore_create_catalog_args() throw(); + CreateCatalogRequest catalog; + + _ThriftHiveMetastore_create_catalog_args__isset __isset; + + void __set_catalog(const CreateCatalogRequest& val); + + bool operator == (const ThriftHiveMetastore_create_catalog_args & rhs) const + { + if (!(catalog == rhs.catalog)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_create_catalog_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_create_catalog_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_create_catalog_pargs { + public: + + + virtual ~ThriftHiveMetastore_create_catalog_pargs() throw(); + const CreateCatalogRequest* catalog; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_create_catalog_result__isset { + _ThriftHiveMetastore_create_catalog_result__isset() : o1(false), o2(false), o3(false) {} + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_create_catalog_result__isset; + +class ThriftHiveMetastore_create_catalog_result { + public: + + ThriftHiveMetastore_create_catalog_result(const ThriftHiveMetastore_create_catalog_result&); + ThriftHiveMetastore_create_catalog_result& operator=(const ThriftHiveMetastore_create_catalog_result&); + ThriftHiveMetastore_create_catalog_result() { + } + + virtual ~ThriftHiveMetastore_create_catalog_result() throw(); + AlreadyExistsException o1; + InvalidObjectException o2; + MetaException o3; + + _ThriftHiveMetastore_create_catalog_result__isset __isset; + + void __set_o1(const AlreadyExistsException& val); + + void __set_o2(const InvalidObjectException& val); + + void __set_o3(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_create_catalog_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_create_catalog_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_create_catalog_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_create_catalog_presult__isset { + _ThriftHiveMetastore_create_catalog_presult__isset() : o1(false), o2(false), o3(false) {} + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_create_catalog_presult__isset; + +class ThriftHiveMetastore_create_catalog_presult { + public: + + + virtual ~ThriftHiveMetastore_create_catalog_presult() throw(); + AlreadyExistsException o1; + InvalidObjectException o2; + MetaException o3; + + _ThriftHiveMetastore_create_catalog_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_get_catalog_args__isset { + _ThriftHiveMetastore_get_catalog_args__isset() : catName(false) {} + bool catName :1; +} _ThriftHiveMetastore_get_catalog_args__isset; + +class ThriftHiveMetastore_get_catalog_args { + public: + + ThriftHiveMetastore_get_catalog_args(const ThriftHiveMetastore_get_catalog_args&); + ThriftHiveMetastore_get_catalog_args& operator=(const ThriftHiveMetastore_get_catalog_args&); + ThriftHiveMetastore_get_catalog_args() { + } + + virtual ~ThriftHiveMetastore_get_catalog_args() throw(); + GetCatalogRequest catName; + + _ThriftHiveMetastore_get_catalog_args__isset __isset; + + void __set_catName(const GetCatalogRequest& val); + + bool operator == (const ThriftHiveMetastore_get_catalog_args & rhs) const + { + if (!(catName == rhs.catName)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_catalog_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_catalog_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_get_catalog_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_catalog_pargs() throw(); + const GetCatalogRequest* catName; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_catalog_result__isset { + _ThriftHiveMetastore_get_catalog_result__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_get_catalog_result__isset; + +class ThriftHiveMetastore_get_catalog_result { + public: + + ThriftHiveMetastore_get_catalog_result(const ThriftHiveMetastore_get_catalog_result&); + ThriftHiveMetastore_get_catalog_result& operator=(const ThriftHiveMetastore_get_catalog_result&); + ThriftHiveMetastore_get_catalog_result() { + } + + virtual ~ThriftHiveMetastore_get_catalog_result() throw(); + GetCatalogResponse success; + NoSuchObjectException o1; + MetaException o2; + + _ThriftHiveMetastore_get_catalog_result__isset __isset; + + void __set_success(const GetCatalogResponse& val); + + void __set_o1(const NoSuchObjectException& val); + + void __set_o2(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_get_catalog_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_catalog_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_catalog_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_catalog_presult__isset { + _ThriftHiveMetastore_get_catalog_presult__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_get_catalog_presult__isset; + +class ThriftHiveMetastore_get_catalog_presult { + public: + + + virtual ~ThriftHiveMetastore_get_catalog_presult() throw(); + GetCatalogResponse* success; + NoSuchObjectException o1; + MetaException o2; + + _ThriftHiveMetastore_get_catalog_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + + +class ThriftHiveMetastore_get_catalogs_args { + public: + + ThriftHiveMetastore_get_catalogs_args(const ThriftHiveMetastore_get_catalogs_args&); + ThriftHiveMetastore_get_catalogs_args& operator=(const ThriftHiveMetastore_get_catalogs_args&); + ThriftHiveMetastore_get_catalogs_args() { + } + + virtual ~ThriftHiveMetastore_get_catalogs_args() throw(); + + bool operator == (const ThriftHiveMetastore_get_catalogs_args & /* rhs */) const + { + return true; + } + bool operator != (const ThriftHiveMetastore_get_catalogs_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_catalogs_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_get_catalogs_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_catalogs_pargs() throw(); + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_catalogs_result__isset { + _ThriftHiveMetastore_get_catalogs_result__isset() : success(false), o1(false) {} + bool success :1; + bool o1 :1; +} _ThriftHiveMetastore_get_catalogs_result__isset; + +class ThriftHiveMetastore_get_catalogs_result { + public: + + ThriftHiveMetastore_get_catalogs_result(const ThriftHiveMetastore_get_catalogs_result&); + ThriftHiveMetastore_get_catalogs_result& operator=(const ThriftHiveMetastore_get_catalogs_result&); + ThriftHiveMetastore_get_catalogs_result() { + } + + virtual ~ThriftHiveMetastore_get_catalogs_result() throw(); + GetCatalogsResponse success; + MetaException o1; + + _ThriftHiveMetastore_get_catalogs_result__isset __isset; + + void __set_success(const GetCatalogsResponse& val); + + void __set_o1(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_get_catalogs_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_catalogs_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_catalogs_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_catalogs_presult__isset { + _ThriftHiveMetastore_get_catalogs_presult__isset() : success(false), o1(false) {} + bool success :1; + bool o1 :1; +} _ThriftHiveMetastore_get_catalogs_presult__isset; + +class ThriftHiveMetastore_get_catalogs_presult { + public: + + + virtual ~ThriftHiveMetastore_get_catalogs_presult() throw(); + GetCatalogsResponse* success; + MetaException o1; + + _ThriftHiveMetastore_get_catalogs_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_drop_catalog_args__isset { + _ThriftHiveMetastore_drop_catalog_args__isset() : catName(false) {} + bool catName :1; +} _ThriftHiveMetastore_drop_catalog_args__isset; + +class ThriftHiveMetastore_drop_catalog_args { + public: + + ThriftHiveMetastore_drop_catalog_args(const ThriftHiveMetastore_drop_catalog_args&); + ThriftHiveMetastore_drop_catalog_args& operator=(const ThriftHiveMetastore_drop_catalog_args&); + ThriftHiveMetastore_drop_catalog_args() { + } + + virtual ~ThriftHiveMetastore_drop_catalog_args() throw(); + DropCatalogRequest catName; + + _ThriftHiveMetastore_drop_catalog_args__isset __isset; + + void __set_catName(const DropCatalogRequest& val); + + bool operator == (const ThriftHiveMetastore_drop_catalog_args & rhs) const + { + if (!(catName == rhs.catName)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_drop_catalog_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_drop_catalog_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_drop_catalog_pargs { + public: + + + virtual ~ThriftHiveMetastore_drop_catalog_pargs() throw(); + const DropCatalogRequest* catName; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_drop_catalog_result__isset { + _ThriftHiveMetastore_drop_catalog_result__isset() : o1(false), o2(false), o3(false) {} + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_drop_catalog_result__isset; + +class ThriftHiveMetastore_drop_catalog_result { + public: + + ThriftHiveMetastore_drop_catalog_result(const ThriftHiveMetastore_drop_catalog_result&); + ThriftHiveMetastore_drop_catalog_result& operator=(const ThriftHiveMetastore_drop_catalog_result&); + ThriftHiveMetastore_drop_catalog_result() { + } + + virtual ~ThriftHiveMetastore_drop_catalog_result() throw(); + NoSuchObjectException o1; + InvalidOperationException o2; + MetaException o3; + + _ThriftHiveMetastore_drop_catalog_result__isset __isset; + + void __set_o1(const NoSuchObjectException& val); + + void __set_o2(const InvalidOperationException& val); + + void __set_o3(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_drop_catalog_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_drop_catalog_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_drop_catalog_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_drop_catalog_presult__isset { + _ThriftHiveMetastore_drop_catalog_presult__isset() : o1(false), o2(false), o3(false) {} + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_drop_catalog_presult__isset; + +class ThriftHiveMetastore_drop_catalog_presult { + public: + + + virtual ~ThriftHiveMetastore_drop_catalog_presult() throw(); + NoSuchObjectException o1; + InvalidOperationException o2; + MetaException o3; + + _ThriftHiveMetastore_drop_catalog_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + typedef struct _ThriftHiveMetastore_create_database_args__isset { _ThriftHiveMetastore_create_database_args__isset() : database(false) {} bool database :1; @@ -5637,7 +6113,8 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_presult { }; typedef struct _ThriftHiveMetastore_update_creation_metadata_args__isset { - _ThriftHiveMetastore_update_creation_metadata_args__isset() : dbname(false), tbl_name(false), creation_metadata(false) {} + _ThriftHiveMetastore_update_creation_metadata_args__isset() : catName(false), dbname(false), tbl_name(false), creation_metadata(false) {} + bool catName :1; bool dbname :1; bool tbl_name :1; bool creation_metadata :1; @@ -5648,16 +6125,19 @@ class ThriftHiveMetastore_update_creation_metadata_args { ThriftHiveMetastore_update_creation_metadata_args(const ThriftHiveMetastore_update_creation_metadata_args&); ThriftHiveMetastore_update_creation_metadata_args& operator=(const ThriftHiveMetastore_update_creation_metadata_args&); - ThriftHiveMetastore_update_creation_metadata_args() : dbname(), tbl_name() { + ThriftHiveMetastore_update_creation_metadata_args() : catName(), dbname(), tbl_name() { } virtual ~ThriftHiveMetastore_update_creation_metadata_args() throw(); + std::string catName; std::string dbname; std::string tbl_name; CreationMetadata creation_metadata; _ThriftHiveMetastore_update_creation_metadata_args__isset __isset; + void __set_catName(const std::string& val); + void __set_dbname(const std::string& val); void __set_tbl_name(const std::string& val); @@ -5666,6 +6146,8 @@ class ThriftHiveMetastore_update_creation_metadata_args { bool operator == (const ThriftHiveMetastore_update_creation_metadata_args & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(dbname == rhs.dbname)) return false; if (!(tbl_name == rhs.tbl_name)) @@ -5691,6 +6173,7 @@ class ThriftHiveMetastore_update_creation_metadata_pargs { virtual ~ThriftHiveMetastore_update_creation_metadata_pargs() throw(); + const std::string* catName; const std::string* dbname; const std::string* tbl_name; const CreationMetadata* creation_metadata; @@ -24933,6 +25416,18 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void setMetaConf(const std::string& key, const std::string& value); void send_setMetaConf(const std::string& key, const std::string& value); void recv_setMetaConf(); + void create_catalog(const CreateCatalogRequest& catalog); + void send_create_catalog(const CreateCatalogRequest& catalog); + void recv_create_catalog(); + void get_catalog(GetCatalogResponse& _return, const GetCatalogRequest& catName); + void send_get_catalog(const GetCatalogRequest& catName); + void recv_get_catalog(GetCatalogResponse& _return); + void get_catalogs(GetCatalogsResponse& _return); + void send_get_catalogs(); + void recv_get_catalogs(GetCatalogsResponse& _return); + void drop_catalog(const DropCatalogRequest& catName); + void send_drop_catalog(const DropCatalogRequest& catName); + void recv_drop_catalog(); void create_database(const Database& database); void send_create_database(const Database& database); void recv_create_database(); @@ -25044,8 +25539,8 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void get_materialization_invalidation_info(std::map & _return, const std::string& dbname, const std::vector & tbl_names); void send_get_materialization_invalidation_info(const std::string& dbname, const std::vector & tbl_names); void recv_get_materialization_invalidation_info(std::map & _return); - void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); - void send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); + void update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); + void send_update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); void recv_update_creation_metadata(); void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables); void send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables); @@ -25527,6 +26022,10 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP ProcessMap processMap_; void process_getMetaConf(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_setMetaConf(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_create_catalog(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_catalog(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_catalogs(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_drop_catalog(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_create_database(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_database(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_drop_database(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); @@ -25727,6 +26226,10 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP iface_(iface) { processMap_["getMetaConf"] = &ThriftHiveMetastoreProcessor::process_getMetaConf; processMap_["setMetaConf"] = &ThriftHiveMetastoreProcessor::process_setMetaConf; + processMap_["create_catalog"] = &ThriftHiveMetastoreProcessor::process_create_catalog; + processMap_["get_catalog"] = &ThriftHiveMetastoreProcessor::process_get_catalog; + processMap_["get_catalogs"] = &ThriftHiveMetastoreProcessor::process_get_catalogs; + processMap_["drop_catalog"] = &ThriftHiveMetastoreProcessor::process_drop_catalog; processMap_["create_database"] = &ThriftHiveMetastoreProcessor::process_create_database; processMap_["get_database"] = &ThriftHiveMetastoreProcessor::process_get_database; processMap_["drop_database"] = &ThriftHiveMetastoreProcessor::process_drop_database; @@ -25973,6 +26476,44 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi ifaces_[i]->setMetaConf(key, value); } + void create_catalog(const CreateCatalogRequest& catalog) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->create_catalog(catalog); + } + ifaces_[i]->create_catalog(catalog); + } + + void get_catalog(GetCatalogResponse& _return, const GetCatalogRequest& catName) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_catalog(_return, catName); + } + ifaces_[i]->get_catalog(_return, catName); + return; + } + + void get_catalogs(GetCatalogsResponse& _return) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_catalogs(_return); + } + ifaces_[i]->get_catalogs(_return); + return; + } + + void drop_catalog(const DropCatalogRequest& catName) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->drop_catalog(catName); + } + ifaces_[i]->drop_catalog(catName); + } + void create_database(const Database& database) { size_t sz = ifaces_.size(); size_t i = 0; @@ -26325,13 +26866,13 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi return; } - void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { + void update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { size_t sz = ifaces_.size(); size_t i = 0; for (; i < (sz - 1); ++i) { - ifaces_[i]->update_creation_metadata(dbname, tbl_name, creation_metadata); + ifaces_[i]->update_creation_metadata(catName, dbname, tbl_name, creation_metadata); } - ifaces_[i]->update_creation_metadata(dbname, tbl_name, creation_metadata); + ifaces_[i]->update_creation_metadata(catName, dbname, tbl_name, creation_metadata); } void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) { @@ -27861,6 +28402,18 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void setMetaConf(const std::string& key, const std::string& value); int32_t send_setMetaConf(const std::string& key, const std::string& value); void recv_setMetaConf(const int32_t seqid); + void create_catalog(const CreateCatalogRequest& catalog); + int32_t send_create_catalog(const CreateCatalogRequest& catalog); + void recv_create_catalog(const int32_t seqid); + void get_catalog(GetCatalogResponse& _return, const GetCatalogRequest& catName); + int32_t send_get_catalog(const GetCatalogRequest& catName); + void recv_get_catalog(GetCatalogResponse& _return, const int32_t seqid); + void get_catalogs(GetCatalogsResponse& _return); + int32_t send_get_catalogs(); + void recv_get_catalogs(GetCatalogsResponse& _return, const int32_t seqid); + void drop_catalog(const DropCatalogRequest& catName); + int32_t send_drop_catalog(const DropCatalogRequest& catName); + void recv_drop_catalog(const int32_t seqid); void create_database(const Database& database); int32_t send_create_database(const Database& database); void recv_create_database(const int32_t seqid); @@ -27972,8 +28525,8 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void get_materialization_invalidation_info(std::map & _return, const std::string& dbname, const std::vector & tbl_names); int32_t send_get_materialization_invalidation_info(const std::string& dbname, const std::vector & tbl_names); void recv_get_materialization_invalidation_info(std::map & _return, const int32_t seqid); - void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); - int32_t send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); + void update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); + int32_t send_update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); void recv_update_creation_metadata(const int32_t seqid); void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables); int32_t send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables); diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index d7319e2475..cfec64f96a 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -32,6 +32,26 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("setMetaConf\n"); } + void create_catalog(const CreateCatalogRequest& catalog) { + // Your implementation goes here + printf("create_catalog\n"); + } + + void get_catalog(GetCatalogResponse& _return, const GetCatalogRequest& catName) { + // Your implementation goes here + printf("get_catalog\n"); + } + + void get_catalogs(GetCatalogsResponse& _return) { + // Your implementation goes here + printf("get_catalogs\n"); + } + + void drop_catalog(const DropCatalogRequest& catName) { + // Your implementation goes here + printf("drop_catalog\n"); + } + void create_database(const Database& database) { // Your implementation goes here printf("create_database\n"); @@ -217,7 +237,7 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("get_materialization_invalidation_info\n"); } - void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { + void update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { // Your implementation goes here printf("update_creation_metadata\n"); } diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index b254f6969d..9b28a493c9 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -544,6 +544,11 @@ void SQLPrimaryKey::__set_rely_cstr(const bool val) { this->rely_cstr = val; } +void SQLPrimaryKey::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t SQLPrimaryKey::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -629,6 +634,14 @@ uint32_t SQLPrimaryKey::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 9: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -678,6 +691,11 @@ uint32_t SQLPrimaryKey::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += oprot->writeBool(this->rely_cstr); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 9); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -693,6 +711,7 @@ void swap(SQLPrimaryKey &a, SQLPrimaryKey &b) { swap(a.enable_cstr, b.enable_cstr); swap(a.validate_cstr, b.validate_cstr); swap(a.rely_cstr, b.rely_cstr); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -705,6 +724,7 @@ SQLPrimaryKey::SQLPrimaryKey(const SQLPrimaryKey& other4) { enable_cstr = other4.enable_cstr; validate_cstr = other4.validate_cstr; rely_cstr = other4.rely_cstr; + catName = other4.catName; __isset = other4.__isset; } SQLPrimaryKey& SQLPrimaryKey::operator=(const SQLPrimaryKey& other5) { @@ -716,6 +736,7 @@ SQLPrimaryKey& SQLPrimaryKey::operator=(const SQLPrimaryKey& other5) { enable_cstr = other5.enable_cstr; validate_cstr = other5.validate_cstr; rely_cstr = other5.rely_cstr; + catName = other5.catName; __isset = other5.__isset; return *this; } @@ -730,6 +751,7 @@ void SQLPrimaryKey::printTo(std::ostream& out) const { out << ", " << "enable_cstr=" << to_string(enable_cstr); out << ", " << "validate_cstr=" << to_string(validate_cstr); out << ", " << "rely_cstr=" << to_string(rely_cstr); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -794,6 +816,11 @@ void SQLForeignKey::__set_rely_cstr(const bool val) { this->rely_cstr = val; } +void SQLForeignKey::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t SQLForeignKey::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -927,6 +954,14 @@ uint32_t SQLForeignKey::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 15: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -1000,6 +1035,11 @@ uint32_t SQLForeignKey::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += oprot->writeBool(this->rely_cstr); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 15); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -1021,6 +1061,7 @@ void swap(SQLForeignKey &a, SQLForeignKey &b) { swap(a.enable_cstr, b.enable_cstr); swap(a.validate_cstr, b.validate_cstr); swap(a.rely_cstr, b.rely_cstr); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -1039,6 +1080,7 @@ SQLForeignKey::SQLForeignKey(const SQLForeignKey& other6) { enable_cstr = other6.enable_cstr; validate_cstr = other6.validate_cstr; rely_cstr = other6.rely_cstr; + catName = other6.catName; __isset = other6.__isset; } SQLForeignKey& SQLForeignKey::operator=(const SQLForeignKey& other7) { @@ -1056,6 +1098,7 @@ SQLForeignKey& SQLForeignKey::operator=(const SQLForeignKey& other7) { enable_cstr = other7.enable_cstr; validate_cstr = other7.validate_cstr; rely_cstr = other7.rely_cstr; + catName = other7.catName; __isset = other7.__isset; return *this; } @@ -1076,6 +1119,7 @@ void SQLForeignKey::printTo(std::ostream& out) const { out << ", " << "enable_cstr=" << to_string(enable_cstr); out << ", " << "validate_cstr=" << to_string(validate_cstr); out << ", " << "rely_cstr=" << to_string(rely_cstr); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -1084,6 +1128,10 @@ SQLUniqueConstraint::~SQLUniqueConstraint() throw() { } +void SQLUniqueConstraint::__set_catName(const std::string& val) { + this->catName = val; +} + void SQLUniqueConstraint::__set_table_db(const std::string& val) { this->table_db = val; } @@ -1139,13 +1187,21 @@ uint32_t SQLUniqueConstraint::read(::apache::thrift::protocol::TProtocol* iprot) { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->table_db); this->__isset.table_db = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->table_name); this->__isset.table_name = true; @@ -1153,7 +1209,7 @@ uint32_t SQLUniqueConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 3: + case 4: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->column_name); this->__isset.column_name = true; @@ -1161,7 +1217,7 @@ uint32_t SQLUniqueConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 4: + case 5: if (ftype == ::apache::thrift::protocol::T_I32) { xfer += iprot->readI32(this->key_seq); this->__isset.key_seq = true; @@ -1169,7 +1225,7 @@ uint32_t SQLUniqueConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 5: + case 6: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->uk_name); this->__isset.uk_name = true; @@ -1177,7 +1233,7 @@ uint32_t SQLUniqueConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 6: + case 7: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->enable_cstr); this->__isset.enable_cstr = true; @@ -1185,7 +1241,7 @@ uint32_t SQLUniqueConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 7: + case 8: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->validate_cstr); this->__isset.validate_cstr = true; @@ -1193,7 +1249,7 @@ uint32_t SQLUniqueConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 8: + case 9: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->rely_cstr); this->__isset.rely_cstr = true; @@ -1218,35 +1274,39 @@ uint32_t SQLUniqueConstraint::write(::apache::thrift::protocol::TProtocol* oprot apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("SQLUniqueConstraint"); - xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->table_db); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->table_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->column_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("key_seq", ::apache::thrift::protocol::T_I32, 4); + xfer += oprot->writeFieldBegin("key_seq", ::apache::thrift::protocol::T_I32, 5); xfer += oprot->writeI32(this->key_seq); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("uk_name", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeFieldBegin("uk_name", ::apache::thrift::protocol::T_STRING, 6); xfer += oprot->writeString(this->uk_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 6); + xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 7); xfer += oprot->writeBool(this->enable_cstr); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 7); + xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 8); xfer += oprot->writeBool(this->validate_cstr); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 8); + xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 9); xfer += oprot->writeBool(this->rely_cstr); xfer += oprot->writeFieldEnd(); @@ -1257,6 +1317,7 @@ uint32_t SQLUniqueConstraint::write(::apache::thrift::protocol::TProtocol* oprot void swap(SQLUniqueConstraint &a, SQLUniqueConstraint &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.table_db, b.table_db); swap(a.table_name, b.table_name); swap(a.column_name, b.column_name); @@ -1269,6 +1330,7 @@ void swap(SQLUniqueConstraint &a, SQLUniqueConstraint &b) { } SQLUniqueConstraint::SQLUniqueConstraint(const SQLUniqueConstraint& other8) { + catName = other8.catName; table_db = other8.table_db; table_name = other8.table_name; column_name = other8.column_name; @@ -1280,6 +1342,7 @@ SQLUniqueConstraint::SQLUniqueConstraint(const SQLUniqueConstraint& other8) { __isset = other8.__isset; } SQLUniqueConstraint& SQLUniqueConstraint::operator=(const SQLUniqueConstraint& other9) { + catName = other9.catName; table_db = other9.table_db; table_name = other9.table_name; column_name = other9.column_name; @@ -1294,7 +1357,8 @@ SQLUniqueConstraint& SQLUniqueConstraint::operator=(const SQLUniqueConstraint& o void SQLUniqueConstraint::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "SQLUniqueConstraint("; - out << "table_db=" << to_string(table_db); + out << "catName=" << to_string(catName); + out << ", " << "table_db=" << to_string(table_db); out << ", " << "table_name=" << to_string(table_name); out << ", " << "column_name=" << to_string(column_name); out << ", " << "key_seq=" << to_string(key_seq); @@ -1310,6 +1374,10 @@ SQLNotNullConstraint::~SQLNotNullConstraint() throw() { } +void SQLNotNullConstraint::__set_catName(const std::string& val) { + this->catName = val; +} + void SQLNotNullConstraint::__set_table_db(const std::string& val) { this->table_db = val; } @@ -1361,13 +1429,21 @@ uint32_t SQLNotNullConstraint::read(::apache::thrift::protocol::TProtocol* iprot { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->table_db); this->__isset.table_db = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->table_name); this->__isset.table_name = true; @@ -1375,7 +1451,7 @@ uint32_t SQLNotNullConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 3: + case 4: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->column_name); this->__isset.column_name = true; @@ -1383,7 +1459,7 @@ uint32_t SQLNotNullConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 4: + case 5: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->nn_name); this->__isset.nn_name = true; @@ -1391,7 +1467,7 @@ uint32_t SQLNotNullConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 5: + case 6: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->enable_cstr); this->__isset.enable_cstr = true; @@ -1399,7 +1475,7 @@ uint32_t SQLNotNullConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 6: + case 7: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->validate_cstr); this->__isset.validate_cstr = true; @@ -1407,7 +1483,7 @@ uint32_t SQLNotNullConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 7: + case 8: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->rely_cstr); this->__isset.rely_cstr = true; @@ -1432,31 +1508,35 @@ uint32_t SQLNotNullConstraint::write(::apache::thrift::protocol::TProtocol* opro apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("SQLNotNullConstraint"); - xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->table_db); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->table_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->column_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("nn_name", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeFieldBegin("nn_name", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->nn_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 5); + xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 6); xfer += oprot->writeBool(this->enable_cstr); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 6); + xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 7); xfer += oprot->writeBool(this->validate_cstr); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 7); + xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 8); xfer += oprot->writeBool(this->rely_cstr); xfer += oprot->writeFieldEnd(); @@ -1467,6 +1547,7 @@ uint32_t SQLNotNullConstraint::write(::apache::thrift::protocol::TProtocol* opro void swap(SQLNotNullConstraint &a, SQLNotNullConstraint &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.table_db, b.table_db); swap(a.table_name, b.table_name); swap(a.column_name, b.column_name); @@ -1478,6 +1559,7 @@ void swap(SQLNotNullConstraint &a, SQLNotNullConstraint &b) { } SQLNotNullConstraint::SQLNotNullConstraint(const SQLNotNullConstraint& other10) { + catName = other10.catName; table_db = other10.table_db; table_name = other10.table_name; column_name = other10.column_name; @@ -1488,6 +1570,7 @@ SQLNotNullConstraint::SQLNotNullConstraint(const SQLNotNullConstraint& other10) __isset = other10.__isset; } SQLNotNullConstraint& SQLNotNullConstraint::operator=(const SQLNotNullConstraint& other11) { + catName = other11.catName; table_db = other11.table_db; table_name = other11.table_name; column_name = other11.column_name; @@ -1501,7 +1584,8 @@ SQLNotNullConstraint& SQLNotNullConstraint::operator=(const SQLNotNullConstraint void SQLNotNullConstraint::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "SQLNotNullConstraint("; - out << "table_db=" << to_string(table_db); + out << "catName=" << to_string(catName); + out << ", " << "table_db=" << to_string(table_db); out << ", " << "table_name=" << to_string(table_name); out << ", " << "column_name=" << to_string(column_name); out << ", " << "nn_name=" << to_string(nn_name); @@ -1516,6 +1600,10 @@ SQLDefaultConstraint::~SQLDefaultConstraint() throw() { } +void SQLDefaultConstraint::__set_catName(const std::string& val) { + this->catName = val; +} + void SQLDefaultConstraint::__set_table_db(const std::string& val) { this->table_db = val; } @@ -1571,13 +1659,21 @@ uint32_t SQLDefaultConstraint::read(::apache::thrift::protocol::TProtocol* iprot { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->table_db); this->__isset.table_db = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->table_name); this->__isset.table_name = true; @@ -1585,7 +1681,7 @@ uint32_t SQLDefaultConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 3: + case 4: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->column_name); this->__isset.column_name = true; @@ -1593,7 +1689,7 @@ uint32_t SQLDefaultConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 4: + case 5: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->default_value); this->__isset.default_value = true; @@ -1601,7 +1697,7 @@ uint32_t SQLDefaultConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 5: + case 6: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->dc_name); this->__isset.dc_name = true; @@ -1609,7 +1705,7 @@ uint32_t SQLDefaultConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 6: + case 7: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->enable_cstr); this->__isset.enable_cstr = true; @@ -1617,7 +1713,7 @@ uint32_t SQLDefaultConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 7: + case 8: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->validate_cstr); this->__isset.validate_cstr = true; @@ -1625,7 +1721,7 @@ uint32_t SQLDefaultConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 8: + case 9: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->rely_cstr); this->__isset.rely_cstr = true; @@ -1650,35 +1746,39 @@ uint32_t SQLDefaultConstraint::write(::apache::thrift::protocol::TProtocol* opro apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("SQLDefaultConstraint"); - xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->table_db); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->table_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->column_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("default_value", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeFieldBegin("default_value", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->default_value); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("dc_name", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeFieldBegin("dc_name", ::apache::thrift::protocol::T_STRING, 6); xfer += oprot->writeString(this->dc_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 6); + xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 7); xfer += oprot->writeBool(this->enable_cstr); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 7); + xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 8); xfer += oprot->writeBool(this->validate_cstr); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 8); + xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 9); xfer += oprot->writeBool(this->rely_cstr); xfer += oprot->writeFieldEnd(); @@ -1689,6 +1789,7 @@ uint32_t SQLDefaultConstraint::write(::apache::thrift::protocol::TProtocol* opro void swap(SQLDefaultConstraint &a, SQLDefaultConstraint &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.table_db, b.table_db); swap(a.table_name, b.table_name); swap(a.column_name, b.column_name); @@ -1701,6 +1802,7 @@ void swap(SQLDefaultConstraint &a, SQLDefaultConstraint &b) { } SQLDefaultConstraint::SQLDefaultConstraint(const SQLDefaultConstraint& other12) { + catName = other12.catName; table_db = other12.table_db; table_name = other12.table_name; column_name = other12.column_name; @@ -1712,6 +1814,7 @@ SQLDefaultConstraint::SQLDefaultConstraint(const SQLDefaultConstraint& other12) __isset = other12.__isset; } SQLDefaultConstraint& SQLDefaultConstraint::operator=(const SQLDefaultConstraint& other13) { + catName = other13.catName; table_db = other13.table_db; table_name = other13.table_name; column_name = other13.column_name; @@ -1726,7 +1829,8 @@ SQLDefaultConstraint& SQLDefaultConstraint::operator=(const SQLDefaultConstraint void SQLDefaultConstraint::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "SQLDefaultConstraint("; - out << "table_db=" << to_string(table_db); + out << "catName=" << to_string(catName); + out << ", " << "table_db=" << to_string(table_db); out << ", " << "table_name=" << to_string(table_name); out << ", " << "column_name=" << to_string(column_name); out << ", " << "default_value=" << to_string(default_value); @@ -1742,6 +1846,10 @@ SQLCheckConstraint::~SQLCheckConstraint() throw() { } +void SQLCheckConstraint::__set_catName(const std::string& val) { + this->catName = val; +} + void SQLCheckConstraint::__set_table_db(const std::string& val) { this->table_db = val; } @@ -1797,13 +1905,21 @@ uint32_t SQLCheckConstraint::read(::apache::thrift::protocol::TProtocol* iprot) { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->table_db); this->__isset.table_db = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->table_name); this->__isset.table_name = true; @@ -1811,7 +1927,7 @@ uint32_t SQLCheckConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 3: + case 4: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->column_name); this->__isset.column_name = true; @@ -1819,7 +1935,7 @@ uint32_t SQLCheckConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 4: + case 5: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->check_expression); this->__isset.check_expression = true; @@ -1827,7 +1943,7 @@ uint32_t SQLCheckConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 5: + case 6: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->dc_name); this->__isset.dc_name = true; @@ -1835,7 +1951,7 @@ uint32_t SQLCheckConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 6: + case 7: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->enable_cstr); this->__isset.enable_cstr = true; @@ -1843,7 +1959,7 @@ uint32_t SQLCheckConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 7: + case 8: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->validate_cstr); this->__isset.validate_cstr = true; @@ -1851,7 +1967,7 @@ uint32_t SQLCheckConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 8: + case 9: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->rely_cstr); this->__isset.rely_cstr = true; @@ -1876,35 +1992,39 @@ uint32_t SQLCheckConstraint::write(::apache::thrift::protocol::TProtocol* oprot) apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("SQLCheckConstraint"); - xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->table_db); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->table_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->column_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("check_expression", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeFieldBegin("check_expression", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->check_expression); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("dc_name", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeFieldBegin("dc_name", ::apache::thrift::protocol::T_STRING, 6); xfer += oprot->writeString(this->dc_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 6); + xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 7); xfer += oprot->writeBool(this->enable_cstr); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 7); + xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 8); xfer += oprot->writeBool(this->validate_cstr); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 8); + xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 9); xfer += oprot->writeBool(this->rely_cstr); xfer += oprot->writeFieldEnd(); @@ -1915,6 +2035,7 @@ uint32_t SQLCheckConstraint::write(::apache::thrift::protocol::TProtocol* oprot) void swap(SQLCheckConstraint &a, SQLCheckConstraint &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.table_db, b.table_db); swap(a.table_name, b.table_name); swap(a.column_name, b.column_name); @@ -1927,6 +2048,7 @@ void swap(SQLCheckConstraint &a, SQLCheckConstraint &b) { } SQLCheckConstraint::SQLCheckConstraint(const SQLCheckConstraint& other14) { + catName = other14.catName; table_db = other14.table_db; table_name = other14.table_name; column_name = other14.column_name; @@ -1938,6 +2060,7 @@ SQLCheckConstraint::SQLCheckConstraint(const SQLCheckConstraint& other14) { __isset = other14.__isset; } SQLCheckConstraint& SQLCheckConstraint::operator=(const SQLCheckConstraint& other15) { + catName = other15.catName; table_db = other15.table_db; table_name = other15.table_name; column_name = other15.column_name; @@ -1952,7 +2075,8 @@ SQLCheckConstraint& SQLCheckConstraint::operator=(const SQLCheckConstraint& othe void SQLCheckConstraint::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "SQLCheckConstraint("; - out << "table_db=" << to_string(table_db); + out << "catName=" << to_string(catName); + out << ", " << "table_db=" << to_string(table_db); out << ", " << "table_name=" << to_string(table_name); out << ", " << "column_name=" << to_string(column_name); out << ", " << "check_expression=" << to_string(check_expression); @@ -2160,6 +2284,11 @@ void HiveObjectRef::__set_columnName(const std::string& val) { this->columnName = val; } +void HiveObjectRef::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t HiveObjectRef::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -2235,6 +2364,14 @@ uint32_t HiveObjectRef::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -2280,6 +2417,11 @@ uint32_t HiveObjectRef::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += oprot->writeString(this->columnName); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -2292,6 +2434,7 @@ void swap(HiveObjectRef &a, HiveObjectRef &b) { swap(a.objectName, b.objectName); swap(a.partValues, b.partValues); swap(a.columnName, b.columnName); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -2301,6 +2444,7 @@ HiveObjectRef::HiveObjectRef(const HiveObjectRef& other31) { objectName = other31.objectName; partValues = other31.partValues; columnName = other31.columnName; + catName = other31.catName; __isset = other31.__isset; } HiveObjectRef& HiveObjectRef::operator=(const HiveObjectRef& other32) { @@ -2309,6 +2453,7 @@ HiveObjectRef& HiveObjectRef::operator=(const HiveObjectRef& other32) { objectName = other32.objectName; partValues = other32.partValues; columnName = other32.columnName; + catName = other32.catName; __isset = other32.__isset; return *this; } @@ -2320,6 +2465,7 @@ void HiveObjectRef::printTo(std::ostream& out) const { out << ", " << "objectName=" << to_string(objectName); out << ", " << "partValues=" << to_string(partValues); out << ", " << "columnName=" << to_string(columnName); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -4194,7 +4340,586 @@ void GrantRevokeRoleResponse::__set_success(const bool val) { __isset.success = true; } -uint32_t GrantRevokeRoleResponse::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t GrantRevokeRoleResponse::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->success); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t GrantRevokeRoleResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("GrantRevokeRoleResponse"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 1); + xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(GrantRevokeRoleResponse &a, GrantRevokeRoleResponse &b) { + using ::std::swap; + swap(a.success, b.success); + swap(a.__isset, b.__isset); +} + +GrantRevokeRoleResponse::GrantRevokeRoleResponse(const GrantRevokeRoleResponse& other128) { + success = other128.success; + __isset = other128.__isset; +} +GrantRevokeRoleResponse& GrantRevokeRoleResponse::operator=(const GrantRevokeRoleResponse& other129) { + success = other129.success; + __isset = other129.__isset; + return *this; +} +void GrantRevokeRoleResponse::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GrantRevokeRoleResponse("; + out << "success="; (__isset.success ? (out << to_string(success)) : (out << "")); + out << ")"; +} + + +Catalog::~Catalog() throw() { +} + + +void Catalog::__set_name(const std::string& val) { + this->name = val; +} + +void Catalog::__set_description(const std::string& val) { + this->description = val; +__isset.description = true; +} + +void Catalog::__set_locationUri(const std::string& val) { + this->locationUri = val; +} + +uint32_t Catalog::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->name); + this->__isset.name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->description); + this->__isset.description = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->locationUri); + this->__isset.locationUri = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t Catalog::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("Catalog"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldEnd(); + + if (this->__isset.description) { + xfer += oprot->writeFieldBegin("description", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->description); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldBegin("locationUri", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->locationUri); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(Catalog &a, Catalog &b) { + using ::std::swap; + swap(a.name, b.name); + swap(a.description, b.description); + swap(a.locationUri, b.locationUri); + swap(a.__isset, b.__isset); +} + +Catalog::Catalog(const Catalog& other130) { + name = other130.name; + description = other130.description; + locationUri = other130.locationUri; + __isset = other130.__isset; +} +Catalog& Catalog::operator=(const Catalog& other131) { + name = other131.name; + description = other131.description; + locationUri = other131.locationUri; + __isset = other131.__isset; + return *this; +} +void Catalog::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "Catalog("; + out << "name=" << to_string(name); + out << ", " << "description="; (__isset.description ? (out << to_string(description)) : (out << "")); + out << ", " << "locationUri=" << to_string(locationUri); + out << ")"; +} + + +CreateCatalogRequest::~CreateCatalogRequest() throw() { +} + + +void CreateCatalogRequest::__set_catalog(const Catalog& val) { + this->catalog = val; +} + +uint32_t CreateCatalogRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->catalog.read(iprot); + this->__isset.catalog = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t CreateCatalogRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("CreateCatalogRequest"); + + xfer += oprot->writeFieldBegin("catalog", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->catalog.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(CreateCatalogRequest &a, CreateCatalogRequest &b) { + using ::std::swap; + swap(a.catalog, b.catalog); + swap(a.__isset, b.__isset); +} + +CreateCatalogRequest::CreateCatalogRequest(const CreateCatalogRequest& other132) { + catalog = other132.catalog; + __isset = other132.__isset; +} +CreateCatalogRequest& CreateCatalogRequest::operator=(const CreateCatalogRequest& other133) { + catalog = other133.catalog; + __isset = other133.__isset; + return *this; +} +void CreateCatalogRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "CreateCatalogRequest("; + out << "catalog=" << to_string(catalog); + out << ")"; +} + + +GetCatalogRequest::~GetCatalogRequest() throw() { +} + + +void GetCatalogRequest::__set_name(const std::string& val) { + this->name = val; +} + +uint32_t GetCatalogRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->name); + this->__isset.name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t GetCatalogRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("GetCatalogRequest"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(GetCatalogRequest &a, GetCatalogRequest &b) { + using ::std::swap; + swap(a.name, b.name); + swap(a.__isset, b.__isset); +} + +GetCatalogRequest::GetCatalogRequest(const GetCatalogRequest& other134) { + name = other134.name; + __isset = other134.__isset; +} +GetCatalogRequest& GetCatalogRequest::operator=(const GetCatalogRequest& other135) { + name = other135.name; + __isset = other135.__isset; + return *this; +} +void GetCatalogRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GetCatalogRequest("; + out << "name=" << to_string(name); + out << ")"; +} + + +GetCatalogResponse::~GetCatalogResponse() throw() { +} + + +void GetCatalogResponse::__set_catalog(const Catalog& val) { + this->catalog = val; +} + +uint32_t GetCatalogResponse::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->catalog.read(iprot); + this->__isset.catalog = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t GetCatalogResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("GetCatalogResponse"); + + xfer += oprot->writeFieldBegin("catalog", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->catalog.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(GetCatalogResponse &a, GetCatalogResponse &b) { + using ::std::swap; + swap(a.catalog, b.catalog); + swap(a.__isset, b.__isset); +} + +GetCatalogResponse::GetCatalogResponse(const GetCatalogResponse& other136) { + catalog = other136.catalog; + __isset = other136.__isset; +} +GetCatalogResponse& GetCatalogResponse::operator=(const GetCatalogResponse& other137) { + catalog = other137.catalog; + __isset = other137.__isset; + return *this; +} +void GetCatalogResponse::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GetCatalogResponse("; + out << "catalog=" << to_string(catalog); + out << ")"; +} + + +GetCatalogsResponse::~GetCatalogsResponse() throw() { +} + + +void GetCatalogsResponse::__set_names(const std::vector & val) { + this->names = val; +} + +uint32_t GetCatalogsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->names.clear(); + uint32_t _size138; + ::apache::thrift::protocol::TType _etype141; + xfer += iprot->readListBegin(_etype141, _size138); + this->names.resize(_size138); + uint32_t _i142; + for (_i142 = 0; _i142 < _size138; ++_i142) + { + xfer += iprot->readString(this->names[_i142]); + } + xfer += iprot->readListEnd(); + } + this->__isset.names = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t GetCatalogsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("GetCatalogsResponse"); + + xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); + std::vector ::const_iterator _iter143; + for (_iter143 = this->names.begin(); _iter143 != this->names.end(); ++_iter143) + { + xfer += oprot->writeString((*_iter143)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(GetCatalogsResponse &a, GetCatalogsResponse &b) { + using ::std::swap; + swap(a.names, b.names); + swap(a.__isset, b.__isset); +} + +GetCatalogsResponse::GetCatalogsResponse(const GetCatalogsResponse& other144) { + names = other144.names; + __isset = other144.__isset; +} +GetCatalogsResponse& GetCatalogsResponse::operator=(const GetCatalogsResponse& other145) { + names = other145.names; + __isset = other145.__isset; + return *this; +} +void GetCatalogsResponse::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GetCatalogsResponse("; + out << "names=" << to_string(names); + out << ")"; +} + + +DropCatalogRequest::~DropCatalogRequest() throw() { +} + + +void DropCatalogRequest::__set_name(const std::string& val) { + this->name = val; +} + +uint32_t DropCatalogRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -4216,9 +4941,9 @@ uint32_t GrantRevokeRoleResponse::read(::apache::thrift::protocol::TProtocol* ip switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->success); - this->__isset.success = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->name); + this->__isset.name = true; } else { xfer += iprot->skip(ftype); } @@ -4235,40 +4960,39 @@ uint32_t GrantRevokeRoleResponse::read(::apache::thrift::protocol::TProtocol* ip return xfer; } -uint32_t GrantRevokeRoleResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t DropCatalogRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("GrantRevokeRoleResponse"); + xfer += oprot->writeStructBegin("DropCatalogRequest"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldEnd(); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 1); - xfer += oprot->writeBool(this->success); - xfer += oprot->writeFieldEnd(); - } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -void swap(GrantRevokeRoleResponse &a, GrantRevokeRoleResponse &b) { +void swap(DropCatalogRequest &a, DropCatalogRequest &b) { using ::std::swap; - swap(a.success, b.success); + swap(a.name, b.name); swap(a.__isset, b.__isset); } -GrantRevokeRoleResponse::GrantRevokeRoleResponse(const GrantRevokeRoleResponse& other128) { - success = other128.success; - __isset = other128.__isset; +DropCatalogRequest::DropCatalogRequest(const DropCatalogRequest& other146) { + name = other146.name; + __isset = other146.__isset; } -GrantRevokeRoleResponse& GrantRevokeRoleResponse::operator=(const GrantRevokeRoleResponse& other129) { - success = other129.success; - __isset = other129.__isset; +DropCatalogRequest& DropCatalogRequest::operator=(const DropCatalogRequest& other147) { + name = other147.name; + __isset = other147.__isset; return *this; } -void GrantRevokeRoleResponse::printTo(std::ostream& out) const { +void DropCatalogRequest::printTo(std::ostream& out) const { using ::apache::thrift::to_string; - out << "GrantRevokeRoleResponse("; - out << "success="; (__isset.success ? (out << to_string(success)) : (out << "")); + out << "DropCatalogRequest("; + out << "name=" << to_string(name); out << ")"; } @@ -4308,6 +5032,11 @@ void Database::__set_ownerType(const PrincipalType::type val) { __isset.ownerType = true; } +void Database::__set_catalogName(const std::string& val) { + this->catalogName = val; +__isset.catalogName = true; +} + uint32_t Database::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -4357,17 +5086,17 @@ uint32_t Database::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size130; - ::apache::thrift::protocol::TType _ktype131; - ::apache::thrift::protocol::TType _vtype132; - xfer += iprot->readMapBegin(_ktype131, _vtype132, _size130); - uint32_t _i134; - for (_i134 = 0; _i134 < _size130; ++_i134) + uint32_t _size148; + ::apache::thrift::protocol::TType _ktype149; + ::apache::thrift::protocol::TType _vtype150; + xfer += iprot->readMapBegin(_ktype149, _vtype150, _size148); + uint32_t _i152; + for (_i152 = 0; _i152 < _size148; ++_i152) { - std::string _key135; - xfer += iprot->readString(_key135); - std::string& _val136 = this->parameters[_key135]; - xfer += iprot->readString(_val136); + std::string _key153; + xfer += iprot->readString(_key153); + std::string& _val154 = this->parameters[_key153]; + xfer += iprot->readString(_val154); } xfer += iprot->readMapEnd(); } @@ -4394,14 +5123,22 @@ uint32_t Database::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 7: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast137; - xfer += iprot->readI32(ecast137); - this->ownerType = (PrincipalType::type)ecast137; + int32_t ecast155; + xfer += iprot->readI32(ecast155); + this->ownerType = (PrincipalType::type)ecast155; this->__isset.ownerType = true; } else { xfer += iprot->skip(ftype); } break; + case 8: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catalogName); + this->__isset.catalogName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -4434,11 +5171,11 @@ uint32_t Database::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 4); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter138; - for (_iter138 = this->parameters.begin(); _iter138 != this->parameters.end(); ++_iter138) + std::map ::const_iterator _iter156; + for (_iter156 = this->parameters.begin(); _iter156 != this->parameters.end(); ++_iter156) { - xfer += oprot->writeString(_iter138->first); - xfer += oprot->writeString(_iter138->second); + xfer += oprot->writeString(_iter156->first); + xfer += oprot->writeString(_iter156->second); } xfer += oprot->writeMapEnd(); } @@ -4459,6 +5196,11 @@ uint32_t Database::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeI32((int32_t)this->ownerType); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catalogName) { + xfer += oprot->writeFieldBegin("catalogName", ::apache::thrift::protocol::T_STRING, 8); + xfer += oprot->writeString(this->catalogName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -4473,28 +5215,31 @@ void swap(Database &a, Database &b) { swap(a.privileges, b.privileges); swap(a.ownerName, b.ownerName); swap(a.ownerType, b.ownerType); + swap(a.catalogName, b.catalogName); swap(a.__isset, b.__isset); } -Database::Database(const Database& other139) { - name = other139.name; - description = other139.description; - locationUri = other139.locationUri; - parameters = other139.parameters; - privileges = other139.privileges; - ownerName = other139.ownerName; - ownerType = other139.ownerType; - __isset = other139.__isset; -} -Database& Database::operator=(const Database& other140) { - name = other140.name; - description = other140.description; - locationUri = other140.locationUri; - parameters = other140.parameters; - privileges = other140.privileges; - ownerName = other140.ownerName; - ownerType = other140.ownerType; - __isset = other140.__isset; +Database::Database(const Database& other157) { + name = other157.name; + description = other157.description; + locationUri = other157.locationUri; + parameters = other157.parameters; + privileges = other157.privileges; + ownerName = other157.ownerName; + ownerType = other157.ownerType; + catalogName = other157.catalogName; + __isset = other157.__isset; +} +Database& Database::operator=(const Database& other158) { + name = other158.name; + description = other158.description; + locationUri = other158.locationUri; + parameters = other158.parameters; + privileges = other158.privileges; + ownerName = other158.ownerName; + ownerType = other158.ownerType; + catalogName = other158.catalogName; + __isset = other158.__isset; return *this; } void Database::printTo(std::ostream& out) const { @@ -4507,6 +5252,7 @@ void Database::printTo(std::ostream& out) const { out << ", " << "privileges="; (__isset.privileges ? (out << to_string(privileges)) : (out << "")); out << ", " << "ownerName="; (__isset.ownerName ? (out << to_string(ownerName)) : (out << "")); out << ", " << "ownerType="; (__isset.ownerType ? (out << to_string(ownerType)) : (out << "")); + out << ", " << "catalogName="; (__isset.catalogName ? (out << to_string(catalogName)) : (out << "")); out << ")"; } @@ -4588,17 +5334,17 @@ uint32_t SerDeInfo::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size141; - ::apache::thrift::protocol::TType _ktype142; - ::apache::thrift::protocol::TType _vtype143; - xfer += iprot->readMapBegin(_ktype142, _vtype143, _size141); - uint32_t _i145; - for (_i145 = 0; _i145 < _size141; ++_i145) + uint32_t _size159; + ::apache::thrift::protocol::TType _ktype160; + ::apache::thrift::protocol::TType _vtype161; + xfer += iprot->readMapBegin(_ktype160, _vtype161, _size159); + uint32_t _i163; + for (_i163 = 0; _i163 < _size159; ++_i163) { - std::string _key146; - xfer += iprot->readString(_key146); - std::string& _val147 = this->parameters[_key146]; - xfer += iprot->readString(_val147); + std::string _key164; + xfer += iprot->readString(_key164); + std::string& _val165 = this->parameters[_key164]; + xfer += iprot->readString(_val165); } xfer += iprot->readMapEnd(); } @@ -4633,9 +5379,9 @@ uint32_t SerDeInfo::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 7: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast148; - xfer += iprot->readI32(ecast148); - this->serdeType = (SerdeType::type)ecast148; + int32_t ecast166; + xfer += iprot->readI32(ecast166); + this->serdeType = (SerdeType::type)ecast166; this->__isset.serdeType = true; } else { xfer += iprot->skip(ftype); @@ -4669,11 +5415,11 @@ uint32_t SerDeInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter149; - for (_iter149 = this->parameters.begin(); _iter149 != this->parameters.end(); ++_iter149) + std::map ::const_iterator _iter167; + for (_iter167 = this->parameters.begin(); _iter167 != this->parameters.end(); ++_iter167) { - xfer += oprot->writeString(_iter149->first); - xfer += oprot->writeString(_iter149->second); + xfer += oprot->writeString(_iter167->first); + xfer += oprot->writeString(_iter167->second); } xfer += oprot->writeMapEnd(); } @@ -4716,25 +5462,25 @@ void swap(SerDeInfo &a, SerDeInfo &b) { swap(a.__isset, b.__isset); } -SerDeInfo::SerDeInfo(const SerDeInfo& other150) { - name = other150.name; - serializationLib = other150.serializationLib; - parameters = other150.parameters; - description = other150.description; - serializerClass = other150.serializerClass; - deserializerClass = other150.deserializerClass; - serdeType = other150.serdeType; - __isset = other150.__isset; -} -SerDeInfo& SerDeInfo::operator=(const SerDeInfo& other151) { - name = other151.name; - serializationLib = other151.serializationLib; - parameters = other151.parameters; - description = other151.description; - serializerClass = other151.serializerClass; - deserializerClass = other151.deserializerClass; - serdeType = other151.serdeType; - __isset = other151.__isset; +SerDeInfo::SerDeInfo(const SerDeInfo& other168) { + name = other168.name; + serializationLib = other168.serializationLib; + parameters = other168.parameters; + description = other168.description; + serializerClass = other168.serializerClass; + deserializerClass = other168.deserializerClass; + serdeType = other168.serdeType; + __isset = other168.__isset; +} +SerDeInfo& SerDeInfo::operator=(const SerDeInfo& other169) { + name = other169.name; + serializationLib = other169.serializationLib; + parameters = other169.parameters; + description = other169.description; + serializerClass = other169.serializerClass; + deserializerClass = other169.deserializerClass; + serdeType = other169.serdeType; + __isset = other169.__isset; return *this; } void SerDeInfo::printTo(std::ostream& out) const { @@ -4837,15 +5583,15 @@ void swap(Order &a, Order &b) { swap(a.__isset, b.__isset); } -Order::Order(const Order& other152) { - col = other152.col; - order = other152.order; - __isset = other152.__isset; +Order::Order(const Order& other170) { + col = other170.col; + order = other170.order; + __isset = other170.__isset; } -Order& Order::operator=(const Order& other153) { - col = other153.col; - order = other153.order; - __isset = other153.__isset; +Order& Order::operator=(const Order& other171) { + col = other171.col; + order = other171.order; + __isset = other171.__isset; return *this; } void Order::printTo(std::ostream& out) const { @@ -4898,14 +5644,14 @@ uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->skewedColNames.clear(); - uint32_t _size154; - ::apache::thrift::protocol::TType _etype157; - xfer += iprot->readListBegin(_etype157, _size154); - this->skewedColNames.resize(_size154); - uint32_t _i158; - for (_i158 = 0; _i158 < _size154; ++_i158) + uint32_t _size172; + ::apache::thrift::protocol::TType _etype175; + xfer += iprot->readListBegin(_etype175, _size172); + this->skewedColNames.resize(_size172); + uint32_t _i176; + for (_i176 = 0; _i176 < _size172; ++_i176) { - xfer += iprot->readString(this->skewedColNames[_i158]); + xfer += iprot->readString(this->skewedColNames[_i176]); } xfer += iprot->readListEnd(); } @@ -4918,23 +5664,23 @@ uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->skewedColValues.clear(); - uint32_t _size159; - ::apache::thrift::protocol::TType _etype162; - xfer += iprot->readListBegin(_etype162, _size159); - this->skewedColValues.resize(_size159); - uint32_t _i163; - for (_i163 = 0; _i163 < _size159; ++_i163) + uint32_t _size177; + ::apache::thrift::protocol::TType _etype180; + xfer += iprot->readListBegin(_etype180, _size177); + this->skewedColValues.resize(_size177); + uint32_t _i181; + for (_i181 = 0; _i181 < _size177; ++_i181) { { - this->skewedColValues[_i163].clear(); - uint32_t _size164; - ::apache::thrift::protocol::TType _etype167; - xfer += iprot->readListBegin(_etype167, _size164); - this->skewedColValues[_i163].resize(_size164); - uint32_t _i168; - for (_i168 = 0; _i168 < _size164; ++_i168) + this->skewedColValues[_i181].clear(); + uint32_t _size182; + ::apache::thrift::protocol::TType _etype185; + xfer += iprot->readListBegin(_etype185, _size182); + this->skewedColValues[_i181].resize(_size182); + uint32_t _i186; + for (_i186 = 0; _i186 < _size182; ++_i186) { - xfer += iprot->readString(this->skewedColValues[_i163][_i168]); + xfer += iprot->readString(this->skewedColValues[_i181][_i186]); } xfer += iprot->readListEnd(); } @@ -4950,29 +5696,29 @@ uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->skewedColValueLocationMaps.clear(); - uint32_t _size169; - ::apache::thrift::protocol::TType _ktype170; - ::apache::thrift::protocol::TType _vtype171; - xfer += iprot->readMapBegin(_ktype170, _vtype171, _size169); - uint32_t _i173; - for (_i173 = 0; _i173 < _size169; ++_i173) + uint32_t _size187; + ::apache::thrift::protocol::TType _ktype188; + ::apache::thrift::protocol::TType _vtype189; + xfer += iprot->readMapBegin(_ktype188, _vtype189, _size187); + uint32_t _i191; + for (_i191 = 0; _i191 < _size187; ++_i191) { - std::vector _key174; + std::vector _key192; { - _key174.clear(); - uint32_t _size176; - ::apache::thrift::protocol::TType _etype179; - xfer += iprot->readListBegin(_etype179, _size176); - _key174.resize(_size176); - uint32_t _i180; - for (_i180 = 0; _i180 < _size176; ++_i180) + _key192.clear(); + uint32_t _size194; + ::apache::thrift::protocol::TType _etype197; + xfer += iprot->readListBegin(_etype197, _size194); + _key192.resize(_size194); + uint32_t _i198; + for (_i198 = 0; _i198 < _size194; ++_i198) { - xfer += iprot->readString(_key174[_i180]); + xfer += iprot->readString(_key192[_i198]); } xfer += iprot->readListEnd(); } - std::string& _val175 = this->skewedColValueLocationMaps[_key174]; - xfer += iprot->readString(_val175); + std::string& _val193 = this->skewedColValueLocationMaps[_key192]; + xfer += iprot->readString(_val193); } xfer += iprot->readMapEnd(); } @@ -5001,10 +5747,10 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("skewedColNames", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->skewedColNames.size())); - std::vector ::const_iterator _iter181; - for (_iter181 = this->skewedColNames.begin(); _iter181 != this->skewedColNames.end(); ++_iter181) + std::vector ::const_iterator _iter199; + for (_iter199 = this->skewedColNames.begin(); _iter199 != this->skewedColNames.end(); ++_iter199) { - xfer += oprot->writeString((*_iter181)); + xfer += oprot->writeString((*_iter199)); } xfer += oprot->writeListEnd(); } @@ -5013,15 +5759,15 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("skewedColValues", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_LIST, static_cast(this->skewedColValues.size())); - std::vector > ::const_iterator _iter182; - for (_iter182 = this->skewedColValues.begin(); _iter182 != this->skewedColValues.end(); ++_iter182) + std::vector > ::const_iterator _iter200; + for (_iter200 = this->skewedColValues.begin(); _iter200 != this->skewedColValues.end(); ++_iter200) { { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*_iter182).size())); - std::vector ::const_iterator _iter183; - for (_iter183 = (*_iter182).begin(); _iter183 != (*_iter182).end(); ++_iter183) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*_iter200).size())); + std::vector ::const_iterator _iter201; + for (_iter201 = (*_iter200).begin(); _iter201 != (*_iter200).end(); ++_iter201) { - xfer += oprot->writeString((*_iter183)); + xfer += oprot->writeString((*_iter201)); } xfer += oprot->writeListEnd(); } @@ -5033,19 +5779,19 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("skewedColValueLocationMaps", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_LIST, ::apache::thrift::protocol::T_STRING, static_cast(this->skewedColValueLocationMaps.size())); - std::map , std::string> ::const_iterator _iter184; - for (_iter184 = this->skewedColValueLocationMaps.begin(); _iter184 != this->skewedColValueLocationMaps.end(); ++_iter184) + std::map , std::string> ::const_iterator _iter202; + for (_iter202 = this->skewedColValueLocationMaps.begin(); _iter202 != this->skewedColValueLocationMaps.end(); ++_iter202) { { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(_iter184->first.size())); - std::vector ::const_iterator _iter185; - for (_iter185 = _iter184->first.begin(); _iter185 != _iter184->first.end(); ++_iter185) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(_iter202->first.size())); + std::vector ::const_iterator _iter203; + for (_iter203 = _iter202->first.begin(); _iter203 != _iter202->first.end(); ++_iter203) { - xfer += oprot->writeString((*_iter185)); + xfer += oprot->writeString((*_iter203)); } xfer += oprot->writeListEnd(); } - xfer += oprot->writeString(_iter184->second); + xfer += oprot->writeString(_iter202->second); } xfer += oprot->writeMapEnd(); } @@ -5064,17 +5810,17 @@ void swap(SkewedInfo &a, SkewedInfo &b) { swap(a.__isset, b.__isset); } -SkewedInfo::SkewedInfo(const SkewedInfo& other186) { - skewedColNames = other186.skewedColNames; - skewedColValues = other186.skewedColValues; - skewedColValueLocationMaps = other186.skewedColValueLocationMaps; - __isset = other186.__isset; +SkewedInfo::SkewedInfo(const SkewedInfo& other204) { + skewedColNames = other204.skewedColNames; + skewedColValues = other204.skewedColValues; + skewedColValueLocationMaps = other204.skewedColValueLocationMaps; + __isset = other204.__isset; } -SkewedInfo& SkewedInfo::operator=(const SkewedInfo& other187) { - skewedColNames = other187.skewedColNames; - skewedColValues = other187.skewedColValues; - skewedColValueLocationMaps = other187.skewedColValueLocationMaps; - __isset = other187.__isset; +SkewedInfo& SkewedInfo::operator=(const SkewedInfo& other205) { + skewedColNames = other205.skewedColNames; + skewedColValues = other205.skewedColValues; + skewedColValueLocationMaps = other205.skewedColValueLocationMaps; + __isset = other205.__isset; return *this; } void SkewedInfo::printTo(std::ostream& out) const { @@ -5166,14 +5912,14 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->cols.clear(); - uint32_t _size188; - ::apache::thrift::protocol::TType _etype191; - xfer += iprot->readListBegin(_etype191, _size188); - this->cols.resize(_size188); - uint32_t _i192; - for (_i192 = 0; _i192 < _size188; ++_i192) + uint32_t _size206; + ::apache::thrift::protocol::TType _etype209; + xfer += iprot->readListBegin(_etype209, _size206); + this->cols.resize(_size206); + uint32_t _i210; + for (_i210 = 0; _i210 < _size206; ++_i210) { - xfer += this->cols[_i192].read(iprot); + xfer += this->cols[_i210].read(iprot); } xfer += iprot->readListEnd(); } @@ -5234,14 +5980,14 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->bucketCols.clear(); - uint32_t _size193; - ::apache::thrift::protocol::TType _etype196; - xfer += iprot->readListBegin(_etype196, _size193); - this->bucketCols.resize(_size193); - uint32_t _i197; - for (_i197 = 0; _i197 < _size193; ++_i197) + uint32_t _size211; + ::apache::thrift::protocol::TType _etype214; + xfer += iprot->readListBegin(_etype214, _size211); + this->bucketCols.resize(_size211); + uint32_t _i215; + for (_i215 = 0; _i215 < _size211; ++_i215) { - xfer += iprot->readString(this->bucketCols[_i197]); + xfer += iprot->readString(this->bucketCols[_i215]); } xfer += iprot->readListEnd(); } @@ -5254,14 +6000,14 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->sortCols.clear(); - uint32_t _size198; - ::apache::thrift::protocol::TType _etype201; - xfer += iprot->readListBegin(_etype201, _size198); - this->sortCols.resize(_size198); - uint32_t _i202; - for (_i202 = 0; _i202 < _size198; ++_i202) + uint32_t _size216; + ::apache::thrift::protocol::TType _etype219; + xfer += iprot->readListBegin(_etype219, _size216); + this->sortCols.resize(_size216); + uint32_t _i220; + for (_i220 = 0; _i220 < _size216; ++_i220) { - xfer += this->sortCols[_i202].read(iprot); + xfer += this->sortCols[_i220].read(iprot); } xfer += iprot->readListEnd(); } @@ -5274,17 +6020,17 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size203; - ::apache::thrift::protocol::TType _ktype204; - ::apache::thrift::protocol::TType _vtype205; - xfer += iprot->readMapBegin(_ktype204, _vtype205, _size203); - uint32_t _i207; - for (_i207 = 0; _i207 < _size203; ++_i207) + uint32_t _size221; + ::apache::thrift::protocol::TType _ktype222; + ::apache::thrift::protocol::TType _vtype223; + xfer += iprot->readMapBegin(_ktype222, _vtype223, _size221); + uint32_t _i225; + for (_i225 = 0; _i225 < _size221; ++_i225) { - std::string _key208; - xfer += iprot->readString(_key208); - std::string& _val209 = this->parameters[_key208]; - xfer += iprot->readString(_val209); + std::string _key226; + xfer += iprot->readString(_key226); + std::string& _val227 = this->parameters[_key226]; + xfer += iprot->readString(_val227); } xfer += iprot->readMapEnd(); } @@ -5329,10 +6075,10 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("cols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->cols.size())); - std::vector ::const_iterator _iter210; - for (_iter210 = this->cols.begin(); _iter210 != this->cols.end(); ++_iter210) + std::vector ::const_iterator _iter228; + for (_iter228 = this->cols.begin(); _iter228 != this->cols.end(); ++_iter228) { - xfer += (*_iter210).write(oprot); + xfer += (*_iter228).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5365,10 +6111,10 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("bucketCols", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->bucketCols.size())); - std::vector ::const_iterator _iter211; - for (_iter211 = this->bucketCols.begin(); _iter211 != this->bucketCols.end(); ++_iter211) + std::vector ::const_iterator _iter229; + for (_iter229 = this->bucketCols.begin(); _iter229 != this->bucketCols.end(); ++_iter229) { - xfer += oprot->writeString((*_iter211)); + xfer += oprot->writeString((*_iter229)); } xfer += oprot->writeListEnd(); } @@ -5377,10 +6123,10 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("sortCols", ::apache::thrift::protocol::T_LIST, 9); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->sortCols.size())); - std::vector ::const_iterator _iter212; - for (_iter212 = this->sortCols.begin(); _iter212 != this->sortCols.end(); ++_iter212) + std::vector ::const_iterator _iter230; + for (_iter230 = this->sortCols.begin(); _iter230 != this->sortCols.end(); ++_iter230) { - xfer += (*_iter212).write(oprot); + xfer += (*_iter230).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5389,11 +6135,11 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 10); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter213; - for (_iter213 = this->parameters.begin(); _iter213 != this->parameters.end(); ++_iter213) + std::map ::const_iterator _iter231; + for (_iter231 = this->parameters.begin(); _iter231 != this->parameters.end(); ++_iter231) { - xfer += oprot->writeString(_iter213->first); - xfer += oprot->writeString(_iter213->second); + xfer += oprot->writeString(_iter231->first); + xfer += oprot->writeString(_iter231->second); } xfer += oprot->writeMapEnd(); } @@ -5431,35 +6177,35 @@ void swap(StorageDescriptor &a, StorageDescriptor &b) { swap(a.__isset, b.__isset); } -StorageDescriptor::StorageDescriptor(const StorageDescriptor& other214) { - cols = other214.cols; - location = other214.location; - inputFormat = other214.inputFormat; - outputFormat = other214.outputFormat; - compressed = other214.compressed; - numBuckets = other214.numBuckets; - serdeInfo = other214.serdeInfo; - bucketCols = other214.bucketCols; - sortCols = other214.sortCols; - parameters = other214.parameters; - skewedInfo = other214.skewedInfo; - storedAsSubDirectories = other214.storedAsSubDirectories; - __isset = other214.__isset; -} -StorageDescriptor& StorageDescriptor::operator=(const StorageDescriptor& other215) { - cols = other215.cols; - location = other215.location; - inputFormat = other215.inputFormat; - outputFormat = other215.outputFormat; - compressed = other215.compressed; - numBuckets = other215.numBuckets; - serdeInfo = other215.serdeInfo; - bucketCols = other215.bucketCols; - sortCols = other215.sortCols; - parameters = other215.parameters; - skewedInfo = other215.skewedInfo; - storedAsSubDirectories = other215.storedAsSubDirectories; - __isset = other215.__isset; +StorageDescriptor::StorageDescriptor(const StorageDescriptor& other232) { + cols = other232.cols; + location = other232.location; + inputFormat = other232.inputFormat; + outputFormat = other232.outputFormat; + compressed = other232.compressed; + numBuckets = other232.numBuckets; + serdeInfo = other232.serdeInfo; + bucketCols = other232.bucketCols; + sortCols = other232.sortCols; + parameters = other232.parameters; + skewedInfo = other232.skewedInfo; + storedAsSubDirectories = other232.storedAsSubDirectories; + __isset = other232.__isset; +} +StorageDescriptor& StorageDescriptor::operator=(const StorageDescriptor& other233) { + cols = other233.cols; + location = other233.location; + inputFormat = other233.inputFormat; + outputFormat = other233.outputFormat; + compressed = other233.compressed; + numBuckets = other233.numBuckets; + serdeInfo = other233.serdeInfo; + bucketCols = other233.bucketCols; + sortCols = other233.sortCols; + parameters = other233.parameters; + skewedInfo = other233.skewedInfo; + storedAsSubDirectories = other233.storedAsSubDirectories; + __isset = other233.__isset; return *this; } void StorageDescriptor::printTo(std::ostream& out) const { @@ -5553,6 +6299,11 @@ void Table::__set_creationMetadata(const CreationMetadata& val) { __isset.creationMetadata = true; } +void Table::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -5634,14 +6385,14 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionKeys.clear(); - uint32_t _size216; - ::apache::thrift::protocol::TType _etype219; - xfer += iprot->readListBegin(_etype219, _size216); - this->partitionKeys.resize(_size216); - uint32_t _i220; - for (_i220 = 0; _i220 < _size216; ++_i220) + uint32_t _size234; + ::apache::thrift::protocol::TType _etype237; + xfer += iprot->readListBegin(_etype237, _size234); + this->partitionKeys.resize(_size234); + uint32_t _i238; + for (_i238 = 0; _i238 < _size234; ++_i238) { - xfer += this->partitionKeys[_i220].read(iprot); + xfer += this->partitionKeys[_i238].read(iprot); } xfer += iprot->readListEnd(); } @@ -5654,17 +6405,17 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size221; - ::apache::thrift::protocol::TType _ktype222; - ::apache::thrift::protocol::TType _vtype223; - xfer += iprot->readMapBegin(_ktype222, _vtype223, _size221); - uint32_t _i225; - for (_i225 = 0; _i225 < _size221; ++_i225) + uint32_t _size239; + ::apache::thrift::protocol::TType _ktype240; + ::apache::thrift::protocol::TType _vtype241; + xfer += iprot->readMapBegin(_ktype240, _vtype241, _size239); + uint32_t _i243; + for (_i243 = 0; _i243 < _size239; ++_i243) { - std::string _key226; - xfer += iprot->readString(_key226); - std::string& _val227 = this->parameters[_key226]; - xfer += iprot->readString(_val227); + std::string _key244; + xfer += iprot->readString(_key244); + std::string& _val245 = this->parameters[_key244]; + xfer += iprot->readString(_val245); } xfer += iprot->readMapEnd(); } @@ -5729,6 +6480,14 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 17: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -5777,10 +6536,10 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionKeys.size())); - std::vector ::const_iterator _iter228; - for (_iter228 = this->partitionKeys.begin(); _iter228 != this->partitionKeys.end(); ++_iter228) + std::vector ::const_iterator _iter246; + for (_iter246 = this->partitionKeys.begin(); _iter246 != this->partitionKeys.end(); ++_iter246) { - xfer += (*_iter228).write(oprot); + xfer += (*_iter246).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5789,11 +6548,11 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter229; - for (_iter229 = this->parameters.begin(); _iter229 != this->parameters.end(); ++_iter229) + std::map ::const_iterator _iter247; + for (_iter247 = this->parameters.begin(); _iter247 != this->parameters.end(); ++_iter247) { - xfer += oprot->writeString(_iter229->first); - xfer += oprot->writeString(_iter229->second); + xfer += oprot->writeString(_iter247->first); + xfer += oprot->writeString(_iter247->second); } xfer += oprot->writeMapEnd(); } @@ -5831,6 +6590,11 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += this->creationMetadata.write(oprot); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 17); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -5854,46 +6618,49 @@ void swap(Table &a, Table &b) { swap(a.temporary, b.temporary); swap(a.rewriteEnabled, b.rewriteEnabled); swap(a.creationMetadata, b.creationMetadata); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -Table::Table(const Table& other230) { - tableName = other230.tableName; - dbName = other230.dbName; - owner = other230.owner; - createTime = other230.createTime; - lastAccessTime = other230.lastAccessTime; - retention = other230.retention; - sd = other230.sd; - partitionKeys = other230.partitionKeys; - parameters = other230.parameters; - viewOriginalText = other230.viewOriginalText; - viewExpandedText = other230.viewExpandedText; - tableType = other230.tableType; - privileges = other230.privileges; - temporary = other230.temporary; - rewriteEnabled = other230.rewriteEnabled; - creationMetadata = other230.creationMetadata; - __isset = other230.__isset; -} -Table& Table::operator=(const Table& other231) { - tableName = other231.tableName; - dbName = other231.dbName; - owner = other231.owner; - createTime = other231.createTime; - lastAccessTime = other231.lastAccessTime; - retention = other231.retention; - sd = other231.sd; - partitionKeys = other231.partitionKeys; - parameters = other231.parameters; - viewOriginalText = other231.viewOriginalText; - viewExpandedText = other231.viewExpandedText; - tableType = other231.tableType; - privileges = other231.privileges; - temporary = other231.temporary; - rewriteEnabled = other231.rewriteEnabled; - creationMetadata = other231.creationMetadata; - __isset = other231.__isset; +Table::Table(const Table& other248) { + tableName = other248.tableName; + dbName = other248.dbName; + owner = other248.owner; + createTime = other248.createTime; + lastAccessTime = other248.lastAccessTime; + retention = other248.retention; + sd = other248.sd; + partitionKeys = other248.partitionKeys; + parameters = other248.parameters; + viewOriginalText = other248.viewOriginalText; + viewExpandedText = other248.viewExpandedText; + tableType = other248.tableType; + privileges = other248.privileges; + temporary = other248.temporary; + rewriteEnabled = other248.rewriteEnabled; + creationMetadata = other248.creationMetadata; + catName = other248.catName; + __isset = other248.__isset; +} +Table& Table::operator=(const Table& other249) { + tableName = other249.tableName; + dbName = other249.dbName; + owner = other249.owner; + createTime = other249.createTime; + lastAccessTime = other249.lastAccessTime; + retention = other249.retention; + sd = other249.sd; + partitionKeys = other249.partitionKeys; + parameters = other249.parameters; + viewOriginalText = other249.viewOriginalText; + viewExpandedText = other249.viewExpandedText; + tableType = other249.tableType; + privileges = other249.privileges; + temporary = other249.temporary; + rewriteEnabled = other249.rewriteEnabled; + creationMetadata = other249.creationMetadata; + catName = other249.catName; + __isset = other249.__isset; return *this; } void Table::printTo(std::ostream& out) const { @@ -5915,6 +6682,7 @@ void Table::printTo(std::ostream& out) const { out << ", " << "temporary="; (__isset.temporary ? (out << to_string(temporary)) : (out << "")); out << ", " << "rewriteEnabled="; (__isset.rewriteEnabled ? (out << to_string(rewriteEnabled)) : (out << "")); out << ", " << "creationMetadata="; (__isset.creationMetadata ? (out << to_string(creationMetadata)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -5956,6 +6724,11 @@ void Partition::__set_privileges(const PrincipalPrivilegeSet& val) { __isset.privileges = true; } +void Partition::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -5981,14 +6754,14 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size232; - ::apache::thrift::protocol::TType _etype235; - xfer += iprot->readListBegin(_etype235, _size232); - this->values.resize(_size232); - uint32_t _i236; - for (_i236 = 0; _i236 < _size232; ++_i236) + uint32_t _size250; + ::apache::thrift::protocol::TType _etype253; + xfer += iprot->readListBegin(_etype253, _size250); + this->values.resize(_size250); + uint32_t _i254; + for (_i254 = 0; _i254 < _size250; ++_i254) { - xfer += iprot->readString(this->values[_i236]); + xfer += iprot->readString(this->values[_i254]); } xfer += iprot->readListEnd(); } @@ -6041,17 +6814,17 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size237; - ::apache::thrift::protocol::TType _ktype238; - ::apache::thrift::protocol::TType _vtype239; - xfer += iprot->readMapBegin(_ktype238, _vtype239, _size237); - uint32_t _i241; - for (_i241 = 0; _i241 < _size237; ++_i241) + uint32_t _size255; + ::apache::thrift::protocol::TType _ktype256; + ::apache::thrift::protocol::TType _vtype257; + xfer += iprot->readMapBegin(_ktype256, _vtype257, _size255); + uint32_t _i259; + for (_i259 = 0; _i259 < _size255; ++_i259) { - std::string _key242; - xfer += iprot->readString(_key242); - std::string& _val243 = this->parameters[_key242]; - xfer += iprot->readString(_val243); + std::string _key260; + xfer += iprot->readString(_key260); + std::string& _val261 = this->parameters[_key260]; + xfer += iprot->readString(_val261); } xfer += iprot->readMapEnd(); } @@ -6068,6 +6841,14 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 9: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -6088,10 +6869,10 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); - std::vector ::const_iterator _iter244; - for (_iter244 = this->values.begin(); _iter244 != this->values.end(); ++_iter244) + std::vector ::const_iterator _iter262; + for (_iter262 = this->values.begin(); _iter262 != this->values.end(); ++_iter262) { - xfer += oprot->writeString((*_iter244)); + xfer += oprot->writeString((*_iter262)); } xfer += oprot->writeListEnd(); } @@ -6120,11 +6901,11 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 7); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter245; - for (_iter245 = this->parameters.begin(); _iter245 != this->parameters.end(); ++_iter245) + std::map ::const_iterator _iter263; + for (_iter263 = this->parameters.begin(); _iter263 != this->parameters.end(); ++_iter263) { - xfer += oprot->writeString(_iter245->first); - xfer += oprot->writeString(_iter245->second); + xfer += oprot->writeString(_iter263->first); + xfer += oprot->writeString(_iter263->second); } xfer += oprot->writeMapEnd(); } @@ -6135,6 +6916,11 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += this->privileges.write(oprot); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 9); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -6150,30 +6936,33 @@ void swap(Partition &a, Partition &b) { swap(a.sd, b.sd); swap(a.parameters, b.parameters); swap(a.privileges, b.privileges); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -Partition::Partition(const Partition& other246) { - values = other246.values; - dbName = other246.dbName; - tableName = other246.tableName; - createTime = other246.createTime; - lastAccessTime = other246.lastAccessTime; - sd = other246.sd; - parameters = other246.parameters; - privileges = other246.privileges; - __isset = other246.__isset; -} -Partition& Partition::operator=(const Partition& other247) { - values = other247.values; - dbName = other247.dbName; - tableName = other247.tableName; - createTime = other247.createTime; - lastAccessTime = other247.lastAccessTime; - sd = other247.sd; - parameters = other247.parameters; - privileges = other247.privileges; - __isset = other247.__isset; +Partition::Partition(const Partition& other264) { + values = other264.values; + dbName = other264.dbName; + tableName = other264.tableName; + createTime = other264.createTime; + lastAccessTime = other264.lastAccessTime; + sd = other264.sd; + parameters = other264.parameters; + privileges = other264.privileges; + catName = other264.catName; + __isset = other264.__isset; +} +Partition& Partition::operator=(const Partition& other265) { + values = other265.values; + dbName = other265.dbName; + tableName = other265.tableName; + createTime = other265.createTime; + lastAccessTime = other265.lastAccessTime; + sd = other265.sd; + parameters = other265.parameters; + privileges = other265.privileges; + catName = other265.catName; + __isset = other265.__isset; return *this; } void Partition::printTo(std::ostream& out) const { @@ -6187,6 +6976,7 @@ void Partition::printTo(std::ostream& out) const { out << ", " << "sd=" << to_string(sd); out << ", " << "parameters=" << to_string(parameters); out << ", " << "privileges="; (__isset.privileges ? (out << to_string(privileges)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -6245,14 +7035,14 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size248; - ::apache::thrift::protocol::TType _etype251; - xfer += iprot->readListBegin(_etype251, _size248); - this->values.resize(_size248); - uint32_t _i252; - for (_i252 = 0; _i252 < _size248; ++_i252) + uint32_t _size266; + ::apache::thrift::protocol::TType _etype269; + xfer += iprot->readListBegin(_etype269, _size266); + this->values.resize(_size266); + uint32_t _i270; + for (_i270 = 0; _i270 < _size266; ++_i270) { - xfer += iprot->readString(this->values[_i252]); + xfer += iprot->readString(this->values[_i270]); } xfer += iprot->readListEnd(); } @@ -6289,17 +7079,17 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size253; - ::apache::thrift::protocol::TType _ktype254; - ::apache::thrift::protocol::TType _vtype255; - xfer += iprot->readMapBegin(_ktype254, _vtype255, _size253); - uint32_t _i257; - for (_i257 = 0; _i257 < _size253; ++_i257) + uint32_t _size271; + ::apache::thrift::protocol::TType _ktype272; + ::apache::thrift::protocol::TType _vtype273; + xfer += iprot->readMapBegin(_ktype272, _vtype273, _size271); + uint32_t _i275; + for (_i275 = 0; _i275 < _size271; ++_i275) { - std::string _key258; - xfer += iprot->readString(_key258); - std::string& _val259 = this->parameters[_key258]; - xfer += iprot->readString(_val259); + std::string _key276; + xfer += iprot->readString(_key276); + std::string& _val277 = this->parameters[_key276]; + xfer += iprot->readString(_val277); } xfer += iprot->readMapEnd(); } @@ -6336,10 +7126,10 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); - std::vector ::const_iterator _iter260; - for (_iter260 = this->values.begin(); _iter260 != this->values.end(); ++_iter260) + std::vector ::const_iterator _iter278; + for (_iter278 = this->values.begin(); _iter278 != this->values.end(); ++_iter278) { - xfer += oprot->writeString((*_iter260)); + xfer += oprot->writeString((*_iter278)); } xfer += oprot->writeListEnd(); } @@ -6360,11 +7150,11 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 5); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter261; - for (_iter261 = this->parameters.begin(); _iter261 != this->parameters.end(); ++_iter261) + std::map ::const_iterator _iter279; + for (_iter279 = this->parameters.begin(); _iter279 != this->parameters.end(); ++_iter279) { - xfer += oprot->writeString(_iter261->first); - xfer += oprot->writeString(_iter261->second); + xfer += oprot->writeString(_iter279->first); + xfer += oprot->writeString(_iter279->second); } xfer += oprot->writeMapEnd(); } @@ -6391,23 +7181,23 @@ void swap(PartitionWithoutSD &a, PartitionWithoutSD &b) { swap(a.__isset, b.__isset); } -PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other262) { - values = other262.values; - createTime = other262.createTime; - lastAccessTime = other262.lastAccessTime; - relativePath = other262.relativePath; - parameters = other262.parameters; - privileges = other262.privileges; - __isset = other262.__isset; -} -PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other263) { - values = other263.values; - createTime = other263.createTime; - lastAccessTime = other263.lastAccessTime; - relativePath = other263.relativePath; - parameters = other263.parameters; - privileges = other263.privileges; - __isset = other263.__isset; +PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other280) { + values = other280.values; + createTime = other280.createTime; + lastAccessTime = other280.lastAccessTime; + relativePath = other280.relativePath; + parameters = other280.parameters; + privileges = other280.privileges; + __isset = other280.__isset; +} +PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other281) { + values = other281.values; + createTime = other281.createTime; + lastAccessTime = other281.lastAccessTime; + relativePath = other281.relativePath; + parameters = other281.parameters; + privileges = other281.privileges; + __isset = other281.__isset; return *this; } void PartitionWithoutSD::printTo(std::ostream& out) const { @@ -6460,14 +7250,14 @@ uint32_t PartitionSpecWithSharedSD::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size264; - ::apache::thrift::protocol::TType _etype267; - xfer += iprot->readListBegin(_etype267, _size264); - this->partitions.resize(_size264); - uint32_t _i268; - for (_i268 = 0; _i268 < _size264; ++_i268) + uint32_t _size282; + ::apache::thrift::protocol::TType _etype285; + xfer += iprot->readListBegin(_etype285, _size282); + this->partitions.resize(_size282); + uint32_t _i286; + for (_i286 = 0; _i286 < _size282; ++_i286) { - xfer += this->partitions[_i268].read(iprot); + xfer += this->partitions[_i286].read(iprot); } xfer += iprot->readListEnd(); } @@ -6504,10 +7294,10 @@ uint32_t PartitionSpecWithSharedSD::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter269; - for (_iter269 = this->partitions.begin(); _iter269 != this->partitions.end(); ++_iter269) + std::vector ::const_iterator _iter287; + for (_iter287 = this->partitions.begin(); _iter287 != this->partitions.end(); ++_iter287) { - xfer += (*_iter269).write(oprot); + xfer += (*_iter287).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6529,15 +7319,15 @@ void swap(PartitionSpecWithSharedSD &a, PartitionSpecWithSharedSD &b) { swap(a.__isset, b.__isset); } -PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other270) { - partitions = other270.partitions; - sd = other270.sd; - __isset = other270.__isset; +PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other288) { + partitions = other288.partitions; + sd = other288.sd; + __isset = other288.__isset; } -PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other271) { - partitions = other271.partitions; - sd = other271.sd; - __isset = other271.__isset; +PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other289) { + partitions = other289.partitions; + sd = other289.sd; + __isset = other289.__isset; return *this; } void PartitionSpecWithSharedSD::printTo(std::ostream& out) const { @@ -6582,14 +7372,14 @@ uint32_t PartitionListComposingSpec::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size272; - ::apache::thrift::protocol::TType _etype275; - xfer += iprot->readListBegin(_etype275, _size272); - this->partitions.resize(_size272); - uint32_t _i276; - for (_i276 = 0; _i276 < _size272; ++_i276) + uint32_t _size290; + ::apache::thrift::protocol::TType _etype293; + xfer += iprot->readListBegin(_etype293, _size290); + this->partitions.resize(_size290); + uint32_t _i294; + for (_i294 = 0; _i294 < _size290; ++_i294) { - xfer += this->partitions[_i276].read(iprot); + xfer += this->partitions[_i294].read(iprot); } xfer += iprot->readListEnd(); } @@ -6618,10 +7408,10 @@ uint32_t PartitionListComposingSpec::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter277; - for (_iter277 = this->partitions.begin(); _iter277 != this->partitions.end(); ++_iter277) + std::vector ::const_iterator _iter295; + for (_iter295 = this->partitions.begin(); _iter295 != this->partitions.end(); ++_iter295) { - xfer += (*_iter277).write(oprot); + xfer += (*_iter295).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6638,13 +7428,13 @@ void swap(PartitionListComposingSpec &a, PartitionListComposingSpec &b) { swap(a.__isset, b.__isset); } -PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other278) { - partitions = other278.partitions; - __isset = other278.__isset; +PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other296) { + partitions = other296.partitions; + __isset = other296.__isset; } -PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other279) { - partitions = other279.partitions; - __isset = other279.__isset; +PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other297) { + partitions = other297.partitions; + __isset = other297.__isset; return *this; } void PartitionListComposingSpec::printTo(std::ostream& out) const { @@ -6681,6 +7471,11 @@ void PartitionSpec::__set_partitionList(const PartitionListComposingSpec& val) { __isset.partitionList = true; } +void PartitionSpec::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t PartitionSpec::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -6742,6 +7537,14 @@ uint32_t PartitionSpec::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -6781,6 +7584,11 @@ uint32_t PartitionSpec::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += this->partitionList.write(oprot); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -6793,24 +7601,27 @@ void swap(PartitionSpec &a, PartitionSpec &b) { swap(a.rootPath, b.rootPath); swap(a.sharedSDPartitionSpec, b.sharedSDPartitionSpec); swap(a.partitionList, b.partitionList); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -PartitionSpec::PartitionSpec(const PartitionSpec& other280) { - dbName = other280.dbName; - tableName = other280.tableName; - rootPath = other280.rootPath; - sharedSDPartitionSpec = other280.sharedSDPartitionSpec; - partitionList = other280.partitionList; - __isset = other280.__isset; +PartitionSpec::PartitionSpec(const PartitionSpec& other298) { + dbName = other298.dbName; + tableName = other298.tableName; + rootPath = other298.rootPath; + sharedSDPartitionSpec = other298.sharedSDPartitionSpec; + partitionList = other298.partitionList; + catName = other298.catName; + __isset = other298.__isset; } -PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other281) { - dbName = other281.dbName; - tableName = other281.tableName; - rootPath = other281.rootPath; - sharedSDPartitionSpec = other281.sharedSDPartitionSpec; - partitionList = other281.partitionList; - __isset = other281.__isset; +PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other299) { + dbName = other299.dbName; + tableName = other299.tableName; + rootPath = other299.rootPath; + sharedSDPartitionSpec = other299.sharedSDPartitionSpec; + partitionList = other299.partitionList; + catName = other299.catName; + __isset = other299.__isset; return *this; } void PartitionSpec::printTo(std::ostream& out) const { @@ -6821,6 +7632,7 @@ void PartitionSpec::printTo(std::ostream& out) const { out << ", " << "rootPath=" << to_string(rootPath); out << ", " << "sharedSDPartitionSpec="; (__isset.sharedSDPartitionSpec ? (out << to_string(sharedSDPartitionSpec)) : (out << "")); out << ", " << "partitionList="; (__isset.partitionList ? (out << to_string(partitionList)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -6956,19 +7768,19 @@ void swap(BooleanColumnStatsData &a, BooleanColumnStatsData &b) { swap(a.__isset, b.__isset); } -BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other282) { - numTrues = other282.numTrues; - numFalses = other282.numFalses; - numNulls = other282.numNulls; - bitVectors = other282.bitVectors; - __isset = other282.__isset; +BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other300) { + numTrues = other300.numTrues; + numFalses = other300.numFalses; + numNulls = other300.numNulls; + bitVectors = other300.bitVectors; + __isset = other300.__isset; } -BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other283) { - numTrues = other283.numTrues; - numFalses = other283.numFalses; - numNulls = other283.numNulls; - bitVectors = other283.bitVectors; - __isset = other283.__isset; +BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other301) { + numTrues = other301.numTrues; + numFalses = other301.numFalses; + numNulls = other301.numNulls; + bitVectors = other301.bitVectors; + __isset = other301.__isset; return *this; } void BooleanColumnStatsData::printTo(std::ostream& out) const { @@ -7131,21 +7943,21 @@ void swap(DoubleColumnStatsData &a, DoubleColumnStatsData &b) { swap(a.__isset, b.__isset); } -DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other284) { - lowValue = other284.lowValue; - highValue = other284.highValue; - numNulls = other284.numNulls; - numDVs = other284.numDVs; - bitVectors = other284.bitVectors; - __isset = other284.__isset; -} -DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other285) { - lowValue = other285.lowValue; - highValue = other285.highValue; - numNulls = other285.numNulls; - numDVs = other285.numDVs; - bitVectors = other285.bitVectors; - __isset = other285.__isset; +DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other302) { + lowValue = other302.lowValue; + highValue = other302.highValue; + numNulls = other302.numNulls; + numDVs = other302.numDVs; + bitVectors = other302.bitVectors; + __isset = other302.__isset; +} +DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other303) { + lowValue = other303.lowValue; + highValue = other303.highValue; + numNulls = other303.numNulls; + numDVs = other303.numDVs; + bitVectors = other303.bitVectors; + __isset = other303.__isset; return *this; } void DoubleColumnStatsData::printTo(std::ostream& out) const { @@ -7309,21 +8121,21 @@ void swap(LongColumnStatsData &a, LongColumnStatsData &b) { swap(a.__isset, b.__isset); } -LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other286) { - lowValue = other286.lowValue; - highValue = other286.highValue; - numNulls = other286.numNulls; - numDVs = other286.numDVs; - bitVectors = other286.bitVectors; - __isset = other286.__isset; -} -LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other287) { - lowValue = other287.lowValue; - highValue = other287.highValue; - numNulls = other287.numNulls; - numDVs = other287.numDVs; - bitVectors = other287.bitVectors; - __isset = other287.__isset; +LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other304) { + lowValue = other304.lowValue; + highValue = other304.highValue; + numNulls = other304.numNulls; + numDVs = other304.numDVs; + bitVectors = other304.bitVectors; + __isset = other304.__isset; +} +LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other305) { + lowValue = other305.lowValue; + highValue = other305.highValue; + numNulls = other305.numNulls; + numDVs = other305.numDVs; + bitVectors = other305.bitVectors; + __isset = other305.__isset; return *this; } void LongColumnStatsData::printTo(std::ostream& out) const { @@ -7489,21 +8301,21 @@ void swap(StringColumnStatsData &a, StringColumnStatsData &b) { swap(a.__isset, b.__isset); } -StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other288) { - maxColLen = other288.maxColLen; - avgColLen = other288.avgColLen; - numNulls = other288.numNulls; - numDVs = other288.numDVs; - bitVectors = other288.bitVectors; - __isset = other288.__isset; -} -StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other289) { - maxColLen = other289.maxColLen; - avgColLen = other289.avgColLen; - numNulls = other289.numNulls; - numDVs = other289.numDVs; - bitVectors = other289.bitVectors; - __isset = other289.__isset; +StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other306) { + maxColLen = other306.maxColLen; + avgColLen = other306.avgColLen; + numNulls = other306.numNulls; + numDVs = other306.numDVs; + bitVectors = other306.bitVectors; + __isset = other306.__isset; +} +StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other307) { + maxColLen = other307.maxColLen; + avgColLen = other307.avgColLen; + numNulls = other307.numNulls; + numDVs = other307.numDVs; + bitVectors = other307.bitVectors; + __isset = other307.__isset; return *this; } void StringColumnStatsData::printTo(std::ostream& out) const { @@ -7649,19 +8461,19 @@ void swap(BinaryColumnStatsData &a, BinaryColumnStatsData &b) { swap(a.__isset, b.__isset); } -BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other290) { - maxColLen = other290.maxColLen; - avgColLen = other290.avgColLen; - numNulls = other290.numNulls; - bitVectors = other290.bitVectors; - __isset = other290.__isset; +BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other308) { + maxColLen = other308.maxColLen; + avgColLen = other308.avgColLen; + numNulls = other308.numNulls; + bitVectors = other308.bitVectors; + __isset = other308.__isset; } -BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other291) { - maxColLen = other291.maxColLen; - avgColLen = other291.avgColLen; - numNulls = other291.numNulls; - bitVectors = other291.bitVectors; - __isset = other291.__isset; +BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other309) { + maxColLen = other309.maxColLen; + avgColLen = other309.avgColLen; + numNulls = other309.numNulls; + bitVectors = other309.bitVectors; + __isset = other309.__isset; return *this; } void BinaryColumnStatsData::printTo(std::ostream& out) const { @@ -7766,13 +8578,13 @@ void swap(Decimal &a, Decimal &b) { swap(a.scale, b.scale); } -Decimal::Decimal(const Decimal& other292) { - unscaled = other292.unscaled; - scale = other292.scale; +Decimal::Decimal(const Decimal& other310) { + unscaled = other310.unscaled; + scale = other310.scale; } -Decimal& Decimal::operator=(const Decimal& other293) { - unscaled = other293.unscaled; - scale = other293.scale; +Decimal& Decimal::operator=(const Decimal& other311) { + unscaled = other311.unscaled; + scale = other311.scale; return *this; } void Decimal::printTo(std::ostream& out) const { @@ -7933,21 +8745,21 @@ void swap(DecimalColumnStatsData &a, DecimalColumnStatsData &b) { swap(a.__isset, b.__isset); } -DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other294) { - lowValue = other294.lowValue; - highValue = other294.highValue; - numNulls = other294.numNulls; - numDVs = other294.numDVs; - bitVectors = other294.bitVectors; - __isset = other294.__isset; -} -DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other295) { - lowValue = other295.lowValue; - highValue = other295.highValue; - numNulls = other295.numNulls; - numDVs = other295.numDVs; - bitVectors = other295.bitVectors; - __isset = other295.__isset; +DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other312) { + lowValue = other312.lowValue; + highValue = other312.highValue; + numNulls = other312.numNulls; + numDVs = other312.numDVs; + bitVectors = other312.bitVectors; + __isset = other312.__isset; +} +DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other313) { + lowValue = other313.lowValue; + highValue = other313.highValue; + numNulls = other313.numNulls; + numDVs = other313.numDVs; + bitVectors = other313.bitVectors; + __isset = other313.__isset; return *this; } void DecimalColumnStatsData::printTo(std::ostream& out) const { @@ -8033,11 +8845,11 @@ void swap(Date &a, Date &b) { swap(a.daysSinceEpoch, b.daysSinceEpoch); } -Date::Date(const Date& other296) { - daysSinceEpoch = other296.daysSinceEpoch; +Date::Date(const Date& other314) { + daysSinceEpoch = other314.daysSinceEpoch; } -Date& Date::operator=(const Date& other297) { - daysSinceEpoch = other297.daysSinceEpoch; +Date& Date::operator=(const Date& other315) { + daysSinceEpoch = other315.daysSinceEpoch; return *this; } void Date::printTo(std::ostream& out) const { @@ -8197,21 +9009,21 @@ void swap(DateColumnStatsData &a, DateColumnStatsData &b) { swap(a.__isset, b.__isset); } -DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other298) { - lowValue = other298.lowValue; - highValue = other298.highValue; - numNulls = other298.numNulls; - numDVs = other298.numDVs; - bitVectors = other298.bitVectors; - __isset = other298.__isset; -} -DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other299) { - lowValue = other299.lowValue; - highValue = other299.highValue; - numNulls = other299.numNulls; - numDVs = other299.numDVs; - bitVectors = other299.bitVectors; - __isset = other299.__isset; +DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other316) { + lowValue = other316.lowValue; + highValue = other316.highValue; + numNulls = other316.numNulls; + numDVs = other316.numDVs; + bitVectors = other316.bitVectors; + __isset = other316.__isset; +} +DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other317) { + lowValue = other317.lowValue; + highValue = other317.highValue; + numNulls = other317.numNulls; + numDVs = other317.numDVs; + bitVectors = other317.bitVectors; + __isset = other317.__isset; return *this; } void DateColumnStatsData::printTo(std::ostream& out) const { @@ -8397,25 +9209,25 @@ void swap(ColumnStatisticsData &a, ColumnStatisticsData &b) { swap(a.__isset, b.__isset); } -ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other300) { - booleanStats = other300.booleanStats; - longStats = other300.longStats; - doubleStats = other300.doubleStats; - stringStats = other300.stringStats; - binaryStats = other300.binaryStats; - decimalStats = other300.decimalStats; - dateStats = other300.dateStats; - __isset = other300.__isset; -} -ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other301) { - booleanStats = other301.booleanStats; - longStats = other301.longStats; - doubleStats = other301.doubleStats; - stringStats = other301.stringStats; - binaryStats = other301.binaryStats; - decimalStats = other301.decimalStats; - dateStats = other301.dateStats; - __isset = other301.__isset; +ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other318) { + booleanStats = other318.booleanStats; + longStats = other318.longStats; + doubleStats = other318.doubleStats; + stringStats = other318.stringStats; + binaryStats = other318.binaryStats; + decimalStats = other318.decimalStats; + dateStats = other318.dateStats; + __isset = other318.__isset; +} +ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other319) { + booleanStats = other319.booleanStats; + longStats = other319.longStats; + doubleStats = other319.doubleStats; + stringStats = other319.stringStats; + binaryStats = other319.binaryStats; + decimalStats = other319.decimalStats; + dateStats = other319.dateStats; + __isset = other319.__isset; return *this; } void ColumnStatisticsData::printTo(std::ostream& out) const { @@ -8543,15 +9355,15 @@ void swap(ColumnStatisticsObj &a, ColumnStatisticsObj &b) { swap(a.statsData, b.statsData); } -ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other302) { - colName = other302.colName; - colType = other302.colType; - statsData = other302.statsData; +ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other320) { + colName = other320.colName; + colType = other320.colType; + statsData = other320.statsData; } -ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other303) { - colName = other303.colName; - colType = other303.colType; - statsData = other303.statsData; +ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other321) { + colName = other321.colName; + colType = other321.colType; + statsData = other321.statsData; return *this; } void ColumnStatisticsObj::printTo(std::ostream& out) const { @@ -8590,6 +9402,11 @@ void ColumnStatisticsDesc::__set_lastAnalyzed(const int64_t val) { __isset.lastAnalyzed = true; } +void ColumnStatisticsDesc::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t ColumnStatisticsDesc::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -8654,6 +9471,14 @@ uint32_t ColumnStatisticsDesc::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -8699,6 +9524,11 @@ uint32_t ColumnStatisticsDesc::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeI64(this->lastAnalyzed); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -8711,24 +9541,27 @@ void swap(ColumnStatisticsDesc &a, ColumnStatisticsDesc &b) { swap(a.tableName, b.tableName); swap(a.partName, b.partName); swap(a.lastAnalyzed, b.lastAnalyzed); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other304) { - isTblLevel = other304.isTblLevel; - dbName = other304.dbName; - tableName = other304.tableName; - partName = other304.partName; - lastAnalyzed = other304.lastAnalyzed; - __isset = other304.__isset; -} -ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other305) { - isTblLevel = other305.isTblLevel; - dbName = other305.dbName; - tableName = other305.tableName; - partName = other305.partName; - lastAnalyzed = other305.lastAnalyzed; - __isset = other305.__isset; +ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other322) { + isTblLevel = other322.isTblLevel; + dbName = other322.dbName; + tableName = other322.tableName; + partName = other322.partName; + lastAnalyzed = other322.lastAnalyzed; + catName = other322.catName; + __isset = other322.__isset; +} +ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other323) { + isTblLevel = other323.isTblLevel; + dbName = other323.dbName; + tableName = other323.tableName; + partName = other323.partName; + lastAnalyzed = other323.lastAnalyzed; + catName = other323.catName; + __isset = other323.__isset; return *this; } void ColumnStatisticsDesc::printTo(std::ostream& out) const { @@ -8739,6 +9572,7 @@ void ColumnStatisticsDesc::printTo(std::ostream& out) const { out << ", " << "tableName=" << to_string(tableName); out << ", " << "partName="; (__isset.partName ? (out << to_string(partName)) : (out << "")); out << ", " << "lastAnalyzed="; (__isset.lastAnalyzed ? (out << to_string(lastAnalyzed)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -8790,14 +9624,14 @@ uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->statsObj.clear(); - uint32_t _size306; - ::apache::thrift::protocol::TType _etype309; - xfer += iprot->readListBegin(_etype309, _size306); - this->statsObj.resize(_size306); - uint32_t _i310; - for (_i310 = 0; _i310 < _size306; ++_i310) + uint32_t _size324; + ::apache::thrift::protocol::TType _etype327; + xfer += iprot->readListBegin(_etype327, _size324); + this->statsObj.resize(_size324); + uint32_t _i328; + for (_i328 = 0; _i328 < _size324; ++_i328) { - xfer += this->statsObj[_i310].read(iprot); + xfer += this->statsObj[_i328].read(iprot); } xfer += iprot->readListEnd(); } @@ -8834,10 +9668,10 @@ uint32_t ColumnStatistics::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("statsObj", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->statsObj.size())); - std::vector ::const_iterator _iter311; - for (_iter311 = this->statsObj.begin(); _iter311 != this->statsObj.end(); ++_iter311) + std::vector ::const_iterator _iter329; + for (_iter329 = this->statsObj.begin(); _iter329 != this->statsObj.end(); ++_iter329) { - xfer += (*_iter311).write(oprot); + xfer += (*_iter329).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8854,13 +9688,13 @@ void swap(ColumnStatistics &a, ColumnStatistics &b) { swap(a.statsObj, b.statsObj); } -ColumnStatistics::ColumnStatistics(const ColumnStatistics& other312) { - statsDesc = other312.statsDesc; - statsObj = other312.statsObj; +ColumnStatistics::ColumnStatistics(const ColumnStatistics& other330) { + statsDesc = other330.statsDesc; + statsObj = other330.statsObj; } -ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other313) { - statsDesc = other313.statsDesc; - statsObj = other313.statsObj; +ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other331) { + statsDesc = other331.statsDesc; + statsObj = other331.statsObj; return *this; } void ColumnStatistics::printTo(std::ostream& out) const { @@ -8911,14 +9745,14 @@ uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colStats.clear(); - uint32_t _size314; - ::apache::thrift::protocol::TType _etype317; - xfer += iprot->readListBegin(_etype317, _size314); - this->colStats.resize(_size314); - uint32_t _i318; - for (_i318 = 0; _i318 < _size314; ++_i318) + uint32_t _size332; + ::apache::thrift::protocol::TType _etype335; + xfer += iprot->readListBegin(_etype335, _size332); + this->colStats.resize(_size332); + uint32_t _i336; + for (_i336 = 0; _i336 < _size332; ++_i336) { - xfer += this->colStats[_i318].read(iprot); + xfer += this->colStats[_i336].read(iprot); } xfer += iprot->readListEnd(); } @@ -8959,10 +9793,10 @@ uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->colStats.size())); - std::vector ::const_iterator _iter319; - for (_iter319 = this->colStats.begin(); _iter319 != this->colStats.end(); ++_iter319) + std::vector ::const_iterator _iter337; + for (_iter337 = this->colStats.begin(); _iter337 != this->colStats.end(); ++_iter337) { - xfer += (*_iter319).write(oprot); + xfer += (*_iter337).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8983,13 +9817,13 @@ void swap(AggrStats &a, AggrStats &b) { swap(a.partsFound, b.partsFound); } -AggrStats::AggrStats(const AggrStats& other320) { - colStats = other320.colStats; - partsFound = other320.partsFound; +AggrStats::AggrStats(const AggrStats& other338) { + colStats = other338.colStats; + partsFound = other338.partsFound; } -AggrStats& AggrStats::operator=(const AggrStats& other321) { - colStats = other321.colStats; - partsFound = other321.partsFound; +AggrStats& AggrStats::operator=(const AggrStats& other339) { + colStats = other339.colStats; + partsFound = other339.partsFound; return *this; } void AggrStats::printTo(std::ostream& out) const { @@ -9040,14 +9874,14 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colStats.clear(); - uint32_t _size322; - ::apache::thrift::protocol::TType _etype325; - xfer += iprot->readListBegin(_etype325, _size322); - this->colStats.resize(_size322); - uint32_t _i326; - for (_i326 = 0; _i326 < _size322; ++_i326) + uint32_t _size340; + ::apache::thrift::protocol::TType _etype343; + xfer += iprot->readListBegin(_etype343, _size340); + this->colStats.resize(_size340); + uint32_t _i344; + for (_i344 = 0; _i344 < _size340; ++_i344) { - xfer += this->colStats[_i326].read(iprot); + xfer += this->colStats[_i344].read(iprot); } xfer += iprot->readListEnd(); } @@ -9086,10 +9920,10 @@ uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->colStats.size())); - std::vector ::const_iterator _iter327; - for (_iter327 = this->colStats.begin(); _iter327 != this->colStats.end(); ++_iter327) + std::vector ::const_iterator _iter345; + for (_iter345 = this->colStats.begin(); _iter345 != this->colStats.end(); ++_iter345) { - xfer += (*_iter327).write(oprot); + xfer += (*_iter345).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9112,15 +9946,15 @@ void swap(SetPartitionsStatsRequest &a, SetPartitionsStatsRequest &b) { swap(a.__isset, b.__isset); } -SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other328) { - colStats = other328.colStats; - needMerge = other328.needMerge; - __isset = other328.__isset; +SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other346) { + colStats = other346.colStats; + needMerge = other346.needMerge; + __isset = other346.__isset; } -SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other329) { - colStats = other329.colStats; - needMerge = other329.needMerge; - __isset = other329.__isset; +SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other347) { + colStats = other347.colStats; + needMerge = other347.needMerge; + __isset = other347.__isset; return *this; } void SetPartitionsStatsRequest::printTo(std::ostream& out) const { @@ -9169,14 +10003,14 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fieldSchemas.clear(); - uint32_t _size330; - ::apache::thrift::protocol::TType _etype333; - xfer += iprot->readListBegin(_etype333, _size330); - this->fieldSchemas.resize(_size330); - uint32_t _i334; - for (_i334 = 0; _i334 < _size330; ++_i334) + uint32_t _size348; + ::apache::thrift::protocol::TType _etype351; + xfer += iprot->readListBegin(_etype351, _size348); + this->fieldSchemas.resize(_size348); + uint32_t _i352; + for (_i352 = 0; _i352 < _size348; ++_i352) { - xfer += this->fieldSchemas[_i334].read(iprot); + xfer += this->fieldSchemas[_i352].read(iprot); } xfer += iprot->readListEnd(); } @@ -9189,17 +10023,17 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size335; - ::apache::thrift::protocol::TType _ktype336; - ::apache::thrift::protocol::TType _vtype337; - xfer += iprot->readMapBegin(_ktype336, _vtype337, _size335); - uint32_t _i339; - for (_i339 = 0; _i339 < _size335; ++_i339) + uint32_t _size353; + ::apache::thrift::protocol::TType _ktype354; + ::apache::thrift::protocol::TType _vtype355; + xfer += iprot->readMapBegin(_ktype354, _vtype355, _size353); + uint32_t _i357; + for (_i357 = 0; _i357 < _size353; ++_i357) { - std::string _key340; - xfer += iprot->readString(_key340); - std::string& _val341 = this->properties[_key340]; - xfer += iprot->readString(_val341); + std::string _key358; + xfer += iprot->readString(_key358); + std::string& _val359 = this->properties[_key358]; + xfer += iprot->readString(_val359); } xfer += iprot->readMapEnd(); } @@ -9228,10 +10062,10 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("fieldSchemas", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->fieldSchemas.size())); - std::vector ::const_iterator _iter342; - for (_iter342 = this->fieldSchemas.begin(); _iter342 != this->fieldSchemas.end(); ++_iter342) + std::vector ::const_iterator _iter360; + for (_iter360 = this->fieldSchemas.begin(); _iter360 != this->fieldSchemas.end(); ++_iter360) { - xfer += (*_iter342).write(oprot); + xfer += (*_iter360).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9240,11 +10074,11 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 2); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter343; - for (_iter343 = this->properties.begin(); _iter343 != this->properties.end(); ++_iter343) + std::map ::const_iterator _iter361; + for (_iter361 = this->properties.begin(); _iter361 != this->properties.end(); ++_iter361) { - xfer += oprot->writeString(_iter343->first); - xfer += oprot->writeString(_iter343->second); + xfer += oprot->writeString(_iter361->first); + xfer += oprot->writeString(_iter361->second); } xfer += oprot->writeMapEnd(); } @@ -9262,15 +10096,15 @@ void swap(Schema &a, Schema &b) { swap(a.__isset, b.__isset); } -Schema::Schema(const Schema& other344) { - fieldSchemas = other344.fieldSchemas; - properties = other344.properties; - __isset = other344.__isset; +Schema::Schema(const Schema& other362) { + fieldSchemas = other362.fieldSchemas; + properties = other362.properties; + __isset = other362.__isset; } -Schema& Schema::operator=(const Schema& other345) { - fieldSchemas = other345.fieldSchemas; - properties = other345.properties; - __isset = other345.__isset; +Schema& Schema::operator=(const Schema& other363) { + fieldSchemas = other363.fieldSchemas; + properties = other363.properties; + __isset = other363.__isset; return *this; } void Schema::printTo(std::ostream& out) const { @@ -9315,17 +10149,17 @@ uint32_t EnvironmentContext::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size346; - ::apache::thrift::protocol::TType _ktype347; - ::apache::thrift::protocol::TType _vtype348; - xfer += iprot->readMapBegin(_ktype347, _vtype348, _size346); - uint32_t _i350; - for (_i350 = 0; _i350 < _size346; ++_i350) + uint32_t _size364; + ::apache::thrift::protocol::TType _ktype365; + ::apache::thrift::protocol::TType _vtype366; + xfer += iprot->readMapBegin(_ktype365, _vtype366, _size364); + uint32_t _i368; + for (_i368 = 0; _i368 < _size364; ++_i368) { - std::string _key351; - xfer += iprot->readString(_key351); - std::string& _val352 = this->properties[_key351]; - xfer += iprot->readString(_val352); + std::string _key369; + xfer += iprot->readString(_key369); + std::string& _val370 = this->properties[_key369]; + xfer += iprot->readString(_val370); } xfer += iprot->readMapEnd(); } @@ -9354,11 +10188,11 @@ uint32_t EnvironmentContext::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter353; - for (_iter353 = this->properties.begin(); _iter353 != this->properties.end(); ++_iter353) + std::map ::const_iterator _iter371; + for (_iter371 = this->properties.begin(); _iter371 != this->properties.end(); ++_iter371) { - xfer += oprot->writeString(_iter353->first); - xfer += oprot->writeString(_iter353->second); + xfer += oprot->writeString(_iter371->first); + xfer += oprot->writeString(_iter371->second); } xfer += oprot->writeMapEnd(); } @@ -9375,13 +10209,13 @@ void swap(EnvironmentContext &a, EnvironmentContext &b) { swap(a.__isset, b.__isset); } -EnvironmentContext::EnvironmentContext(const EnvironmentContext& other354) { - properties = other354.properties; - __isset = other354.__isset; +EnvironmentContext::EnvironmentContext(const EnvironmentContext& other372) { + properties = other372.properties; + __isset = other372.__isset; } -EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other355) { - properties = other355.properties; - __isset = other355.__isset; +EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other373) { + properties = other373.properties; + __isset = other373.__isset; return *this; } void EnvironmentContext::printTo(std::ostream& out) const { @@ -9404,6 +10238,11 @@ void PrimaryKeysRequest::__set_tbl_name(const std::string& val) { this->tbl_name = val; } +void PrimaryKeysRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t PrimaryKeysRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -9443,6 +10282,14 @@ uint32_t PrimaryKeysRequest::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -9472,6 +10319,11 @@ uint32_t PrimaryKeysRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -9481,15 +10333,21 @@ void swap(PrimaryKeysRequest &a, PrimaryKeysRequest &b) { using ::std::swap; swap(a.db_name, b.db_name); swap(a.tbl_name, b.tbl_name); + swap(a.catName, b.catName); + swap(a.__isset, b.__isset); } -PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other356) { - db_name = other356.db_name; - tbl_name = other356.tbl_name; +PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other374) { + db_name = other374.db_name; + tbl_name = other374.tbl_name; + catName = other374.catName; + __isset = other374.__isset; } -PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other357) { - db_name = other357.db_name; - tbl_name = other357.tbl_name; +PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other375) { + db_name = other375.db_name; + tbl_name = other375.tbl_name; + catName = other375.catName; + __isset = other375.__isset; return *this; } void PrimaryKeysRequest::printTo(std::ostream& out) const { @@ -9497,6 +10355,7 @@ void PrimaryKeysRequest::printTo(std::ostream& out) const { out << "PrimaryKeysRequest("; out << "db_name=" << to_string(db_name); out << ", " << "tbl_name=" << to_string(tbl_name); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -9535,14 +10394,14 @@ uint32_t PrimaryKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size358; - ::apache::thrift::protocol::TType _etype361; - xfer += iprot->readListBegin(_etype361, _size358); - this->primaryKeys.resize(_size358); - uint32_t _i362; - for (_i362 = 0; _i362 < _size358; ++_i362) + uint32_t _size376; + ::apache::thrift::protocol::TType _etype379; + xfer += iprot->readListBegin(_etype379, _size376); + this->primaryKeys.resize(_size376); + uint32_t _i380; + for (_i380 = 0; _i380 < _size376; ++_i380) { - xfer += this->primaryKeys[_i362].read(iprot); + xfer += this->primaryKeys[_i380].read(iprot); } xfer += iprot->readListEnd(); } @@ -9573,10 +10432,10 @@ uint32_t PrimaryKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter363; - for (_iter363 = this->primaryKeys.begin(); _iter363 != this->primaryKeys.end(); ++_iter363) + std::vector ::const_iterator _iter381; + for (_iter381 = this->primaryKeys.begin(); _iter381 != this->primaryKeys.end(); ++_iter381) { - xfer += (*_iter363).write(oprot); + xfer += (*_iter381).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9592,11 +10451,11 @@ void swap(PrimaryKeysResponse &a, PrimaryKeysResponse &b) { swap(a.primaryKeys, b.primaryKeys); } -PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other364) { - primaryKeys = other364.primaryKeys; +PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other382) { + primaryKeys = other382.primaryKeys; } -PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other365) { - primaryKeys = other365.primaryKeys; +PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other383) { + primaryKeys = other383.primaryKeys; return *this; } void PrimaryKeysResponse::printTo(std::ostream& out) const { @@ -9627,6 +10486,11 @@ void ForeignKeysRequest::__set_foreign_tbl_name(const std::string& val) { this->foreign_tbl_name = val; } +void ForeignKeysRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t ForeignKeysRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -9680,6 +10544,14 @@ uint32_t ForeignKeysRequest::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -9713,6 +10585,11 @@ uint32_t ForeignKeysRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeString(this->foreign_tbl_name); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -9724,22 +10601,25 @@ void swap(ForeignKeysRequest &a, ForeignKeysRequest &b) { swap(a.parent_tbl_name, b.parent_tbl_name); swap(a.foreign_db_name, b.foreign_db_name); swap(a.foreign_tbl_name, b.foreign_tbl_name); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other366) { - parent_db_name = other366.parent_db_name; - parent_tbl_name = other366.parent_tbl_name; - foreign_db_name = other366.foreign_db_name; - foreign_tbl_name = other366.foreign_tbl_name; - __isset = other366.__isset; -} -ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other367) { - parent_db_name = other367.parent_db_name; - parent_tbl_name = other367.parent_tbl_name; - foreign_db_name = other367.foreign_db_name; - foreign_tbl_name = other367.foreign_tbl_name; - __isset = other367.__isset; +ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other384) { + parent_db_name = other384.parent_db_name; + parent_tbl_name = other384.parent_tbl_name; + foreign_db_name = other384.foreign_db_name; + foreign_tbl_name = other384.foreign_tbl_name; + catName = other384.catName; + __isset = other384.__isset; +} +ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other385) { + parent_db_name = other385.parent_db_name; + parent_tbl_name = other385.parent_tbl_name; + foreign_db_name = other385.foreign_db_name; + foreign_tbl_name = other385.foreign_tbl_name; + catName = other385.catName; + __isset = other385.__isset; return *this; } void ForeignKeysRequest::printTo(std::ostream& out) const { @@ -9749,6 +10629,7 @@ void ForeignKeysRequest::printTo(std::ostream& out) const { out << ", " << "parent_tbl_name=" << to_string(parent_tbl_name); out << ", " << "foreign_db_name=" << to_string(foreign_db_name); out << ", " << "foreign_tbl_name=" << to_string(foreign_tbl_name); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -9787,14 +10668,14 @@ uint32_t ForeignKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size368; - ::apache::thrift::protocol::TType _etype371; - xfer += iprot->readListBegin(_etype371, _size368); - this->foreignKeys.resize(_size368); - uint32_t _i372; - for (_i372 = 0; _i372 < _size368; ++_i372) + uint32_t _size386; + ::apache::thrift::protocol::TType _etype389; + xfer += iprot->readListBegin(_etype389, _size386); + this->foreignKeys.resize(_size386); + uint32_t _i390; + for (_i390 = 0; _i390 < _size386; ++_i390) { - xfer += this->foreignKeys[_i372].read(iprot); + xfer += this->foreignKeys[_i390].read(iprot); } xfer += iprot->readListEnd(); } @@ -9825,10 +10706,10 @@ uint32_t ForeignKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter373; - for (_iter373 = this->foreignKeys.begin(); _iter373 != this->foreignKeys.end(); ++_iter373) + std::vector ::const_iterator _iter391; + for (_iter391 = this->foreignKeys.begin(); _iter391 != this->foreignKeys.end(); ++_iter391) { - xfer += (*_iter373).write(oprot); + xfer += (*_iter391).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9844,11 +10725,11 @@ void swap(ForeignKeysResponse &a, ForeignKeysResponse &b) { swap(a.foreignKeys, b.foreignKeys); } -ForeignKeysResponse::ForeignKeysResponse(const ForeignKeysResponse& other374) { - foreignKeys = other374.foreignKeys; +ForeignKeysResponse::ForeignKeysResponse(const ForeignKeysResponse& other392) { + foreignKeys = other392.foreignKeys; } -ForeignKeysResponse& ForeignKeysResponse::operator=(const ForeignKeysResponse& other375) { - foreignKeys = other375.foreignKeys; +ForeignKeysResponse& ForeignKeysResponse::operator=(const ForeignKeysResponse& other393) { + foreignKeys = other393.foreignKeys; return *this; } void ForeignKeysResponse::printTo(std::ostream& out) const { @@ -9863,6 +10744,10 @@ UniqueConstraintsRequest::~UniqueConstraintsRequest() throw() { } +void UniqueConstraintsRequest::__set_catName(const std::string& val) { + this->catName = val; +} + void UniqueConstraintsRequest::__set_db_name(const std::string& val) { this->db_name = val; } @@ -9883,6 +10768,7 @@ uint32_t UniqueConstraintsRequest::read(::apache::thrift::protocol::TProtocol* i using ::apache::thrift::protocol::TProtocolException; + bool isset_catName = false; bool isset_db_name = false; bool isset_tbl_name = false; @@ -9896,13 +10782,21 @@ uint32_t UniqueConstraintsRequest::read(::apache::thrift::protocol::TProtocol* i { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + isset_catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->db_name); isset_db_name = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->tbl_name); isset_tbl_name = true; @@ -9919,6 +10813,8 @@ uint32_t UniqueConstraintsRequest::read(::apache::thrift::protocol::TProtocol* i xfer += iprot->readStructEnd(); + if (!isset_catName) + throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_db_name) throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_tbl_name) @@ -9931,11 +10827,15 @@ uint32_t UniqueConstraintsRequest::write(::apache::thrift::protocol::TProtocol* apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("UniqueConstraintsRequest"); - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); @@ -9946,23 +10846,27 @@ uint32_t UniqueConstraintsRequest::write(::apache::thrift::protocol::TProtocol* void swap(UniqueConstraintsRequest &a, UniqueConstraintsRequest &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.db_name, b.db_name); swap(a.tbl_name, b.tbl_name); } -UniqueConstraintsRequest::UniqueConstraintsRequest(const UniqueConstraintsRequest& other376) { - db_name = other376.db_name; - tbl_name = other376.tbl_name; +UniqueConstraintsRequest::UniqueConstraintsRequest(const UniqueConstraintsRequest& other394) { + catName = other394.catName; + db_name = other394.db_name; + tbl_name = other394.tbl_name; } -UniqueConstraintsRequest& UniqueConstraintsRequest::operator=(const UniqueConstraintsRequest& other377) { - db_name = other377.db_name; - tbl_name = other377.tbl_name; +UniqueConstraintsRequest& UniqueConstraintsRequest::operator=(const UniqueConstraintsRequest& other395) { + catName = other395.catName; + db_name = other395.db_name; + tbl_name = other395.tbl_name; return *this; } void UniqueConstraintsRequest::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "UniqueConstraintsRequest("; - out << "db_name=" << to_string(db_name); + out << "catName=" << to_string(catName); + out << ", " << "db_name=" << to_string(db_name); out << ", " << "tbl_name=" << to_string(tbl_name); out << ")"; } @@ -10002,14 +10906,14 @@ uint32_t UniqueConstraintsResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraints.clear(); - uint32_t _size378; - ::apache::thrift::protocol::TType _etype381; - xfer += iprot->readListBegin(_etype381, _size378); - this->uniqueConstraints.resize(_size378); - uint32_t _i382; - for (_i382 = 0; _i382 < _size378; ++_i382) + uint32_t _size396; + ::apache::thrift::protocol::TType _etype399; + xfer += iprot->readListBegin(_etype399, _size396); + this->uniqueConstraints.resize(_size396); + uint32_t _i400; + for (_i400 = 0; _i400 < _size396; ++_i400) { - xfer += this->uniqueConstraints[_i382].read(iprot); + xfer += this->uniqueConstraints[_i400].read(iprot); } xfer += iprot->readListEnd(); } @@ -10040,10 +10944,10 @@ uint32_t UniqueConstraintsResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraints.size())); - std::vector ::const_iterator _iter383; - for (_iter383 = this->uniqueConstraints.begin(); _iter383 != this->uniqueConstraints.end(); ++_iter383) + std::vector ::const_iterator _iter401; + for (_iter401 = this->uniqueConstraints.begin(); _iter401 != this->uniqueConstraints.end(); ++_iter401) { - xfer += (*_iter383).write(oprot); + xfer += (*_iter401).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10059,11 +10963,11 @@ void swap(UniqueConstraintsResponse &a, UniqueConstraintsResponse &b) { swap(a.uniqueConstraints, b.uniqueConstraints); } -UniqueConstraintsResponse::UniqueConstraintsResponse(const UniqueConstraintsResponse& other384) { - uniqueConstraints = other384.uniqueConstraints; +UniqueConstraintsResponse::UniqueConstraintsResponse(const UniqueConstraintsResponse& other402) { + uniqueConstraints = other402.uniqueConstraints; } -UniqueConstraintsResponse& UniqueConstraintsResponse::operator=(const UniqueConstraintsResponse& other385) { - uniqueConstraints = other385.uniqueConstraints; +UniqueConstraintsResponse& UniqueConstraintsResponse::operator=(const UniqueConstraintsResponse& other403) { + uniqueConstraints = other403.uniqueConstraints; return *this; } void UniqueConstraintsResponse::printTo(std::ostream& out) const { @@ -10078,6 +10982,10 @@ NotNullConstraintsRequest::~NotNullConstraintsRequest() throw() { } +void NotNullConstraintsRequest::__set_catName(const std::string& val) { + this->catName = val; +} + void NotNullConstraintsRequest::__set_db_name(const std::string& val) { this->db_name = val; } @@ -10098,6 +11006,7 @@ uint32_t NotNullConstraintsRequest::read(::apache::thrift::protocol::TProtocol* using ::apache::thrift::protocol::TProtocolException; + bool isset_catName = false; bool isset_db_name = false; bool isset_tbl_name = false; @@ -10111,13 +11020,21 @@ uint32_t NotNullConstraintsRequest::read(::apache::thrift::protocol::TProtocol* { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + isset_catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->db_name); isset_db_name = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->tbl_name); isset_tbl_name = true; @@ -10134,6 +11051,8 @@ uint32_t NotNullConstraintsRequest::read(::apache::thrift::protocol::TProtocol* xfer += iprot->readStructEnd(); + if (!isset_catName) + throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_db_name) throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_tbl_name) @@ -10146,11 +11065,15 @@ uint32_t NotNullConstraintsRequest::write(::apache::thrift::protocol::TProtocol* apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("NotNullConstraintsRequest"); - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); @@ -10161,23 +11084,27 @@ uint32_t NotNullConstraintsRequest::write(::apache::thrift::protocol::TProtocol* void swap(NotNullConstraintsRequest &a, NotNullConstraintsRequest &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.db_name, b.db_name); swap(a.tbl_name, b.tbl_name); } -NotNullConstraintsRequest::NotNullConstraintsRequest(const NotNullConstraintsRequest& other386) { - db_name = other386.db_name; - tbl_name = other386.tbl_name; +NotNullConstraintsRequest::NotNullConstraintsRequest(const NotNullConstraintsRequest& other404) { + catName = other404.catName; + db_name = other404.db_name; + tbl_name = other404.tbl_name; } -NotNullConstraintsRequest& NotNullConstraintsRequest::operator=(const NotNullConstraintsRequest& other387) { - db_name = other387.db_name; - tbl_name = other387.tbl_name; +NotNullConstraintsRequest& NotNullConstraintsRequest::operator=(const NotNullConstraintsRequest& other405) { + catName = other405.catName; + db_name = other405.db_name; + tbl_name = other405.tbl_name; return *this; } void NotNullConstraintsRequest::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "NotNullConstraintsRequest("; - out << "db_name=" << to_string(db_name); + out << "catName=" << to_string(catName); + out << ", " << "db_name=" << to_string(db_name); out << ", " << "tbl_name=" << to_string(tbl_name); out << ")"; } @@ -10217,14 +11144,14 @@ uint32_t NotNullConstraintsResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraints.clear(); - uint32_t _size388; - ::apache::thrift::protocol::TType _etype391; - xfer += iprot->readListBegin(_etype391, _size388); - this->notNullConstraints.resize(_size388); - uint32_t _i392; - for (_i392 = 0; _i392 < _size388; ++_i392) + uint32_t _size406; + ::apache::thrift::protocol::TType _etype409; + xfer += iprot->readListBegin(_etype409, _size406); + this->notNullConstraints.resize(_size406); + uint32_t _i410; + for (_i410 = 0; _i410 < _size406; ++_i410) { - xfer += this->notNullConstraints[_i392].read(iprot); + xfer += this->notNullConstraints[_i410].read(iprot); } xfer += iprot->readListEnd(); } @@ -10255,10 +11182,10 @@ uint32_t NotNullConstraintsResponse::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraints.size())); - std::vector ::const_iterator _iter393; - for (_iter393 = this->notNullConstraints.begin(); _iter393 != this->notNullConstraints.end(); ++_iter393) + std::vector ::const_iterator _iter411; + for (_iter411 = this->notNullConstraints.begin(); _iter411 != this->notNullConstraints.end(); ++_iter411) { - xfer += (*_iter393).write(oprot); + xfer += (*_iter411).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10274,11 +11201,11 @@ void swap(NotNullConstraintsResponse &a, NotNullConstraintsResponse &b) { swap(a.notNullConstraints, b.notNullConstraints); } -NotNullConstraintsResponse::NotNullConstraintsResponse(const NotNullConstraintsResponse& other394) { - notNullConstraints = other394.notNullConstraints; +NotNullConstraintsResponse::NotNullConstraintsResponse(const NotNullConstraintsResponse& other412) { + notNullConstraints = other412.notNullConstraints; } -NotNullConstraintsResponse& NotNullConstraintsResponse::operator=(const NotNullConstraintsResponse& other395) { - notNullConstraints = other395.notNullConstraints; +NotNullConstraintsResponse& NotNullConstraintsResponse::operator=(const NotNullConstraintsResponse& other413) { + notNullConstraints = other413.notNullConstraints; return *this; } void NotNullConstraintsResponse::printTo(std::ostream& out) const { @@ -10293,6 +11220,10 @@ DefaultConstraintsRequest::~DefaultConstraintsRequest() throw() { } +void DefaultConstraintsRequest::__set_catName(const std::string& val) { + this->catName = val; +} + void DefaultConstraintsRequest::__set_db_name(const std::string& val) { this->db_name = val; } @@ -10313,6 +11244,7 @@ uint32_t DefaultConstraintsRequest::read(::apache::thrift::protocol::TProtocol* using ::apache::thrift::protocol::TProtocolException; + bool isset_catName = false; bool isset_db_name = false; bool isset_tbl_name = false; @@ -10326,13 +11258,21 @@ uint32_t DefaultConstraintsRequest::read(::apache::thrift::protocol::TProtocol* { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + isset_catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->db_name); isset_db_name = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->tbl_name); isset_tbl_name = true; @@ -10349,6 +11289,8 @@ uint32_t DefaultConstraintsRequest::read(::apache::thrift::protocol::TProtocol* xfer += iprot->readStructEnd(); + if (!isset_catName) + throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_db_name) throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_tbl_name) @@ -10361,11 +11303,15 @@ uint32_t DefaultConstraintsRequest::write(::apache::thrift::protocol::TProtocol* apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("DefaultConstraintsRequest"); - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); @@ -10376,23 +11322,27 @@ uint32_t DefaultConstraintsRequest::write(::apache::thrift::protocol::TProtocol* void swap(DefaultConstraintsRequest &a, DefaultConstraintsRequest &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.db_name, b.db_name); swap(a.tbl_name, b.tbl_name); } -DefaultConstraintsRequest::DefaultConstraintsRequest(const DefaultConstraintsRequest& other396) { - db_name = other396.db_name; - tbl_name = other396.tbl_name; +DefaultConstraintsRequest::DefaultConstraintsRequest(const DefaultConstraintsRequest& other414) { + catName = other414.catName; + db_name = other414.db_name; + tbl_name = other414.tbl_name; } -DefaultConstraintsRequest& DefaultConstraintsRequest::operator=(const DefaultConstraintsRequest& other397) { - db_name = other397.db_name; - tbl_name = other397.tbl_name; +DefaultConstraintsRequest& DefaultConstraintsRequest::operator=(const DefaultConstraintsRequest& other415) { + catName = other415.catName; + db_name = other415.db_name; + tbl_name = other415.tbl_name; return *this; } void DefaultConstraintsRequest::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "DefaultConstraintsRequest("; - out << "db_name=" << to_string(db_name); + out << "catName=" << to_string(catName); + out << ", " << "db_name=" << to_string(db_name); out << ", " << "tbl_name=" << to_string(tbl_name); out << ")"; } @@ -10432,14 +11382,14 @@ uint32_t DefaultConstraintsResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->defaultConstraints.clear(); - uint32_t _size398; - ::apache::thrift::protocol::TType _etype401; - xfer += iprot->readListBegin(_etype401, _size398); - this->defaultConstraints.resize(_size398); - uint32_t _i402; - for (_i402 = 0; _i402 < _size398; ++_i402) + uint32_t _size416; + ::apache::thrift::protocol::TType _etype419; + xfer += iprot->readListBegin(_etype419, _size416); + this->defaultConstraints.resize(_size416); + uint32_t _i420; + for (_i420 = 0; _i420 < _size416; ++_i420) { - xfer += this->defaultConstraints[_i402].read(iprot); + xfer += this->defaultConstraints[_i420].read(iprot); } xfer += iprot->readListEnd(); } @@ -10470,10 +11420,10 @@ uint32_t DefaultConstraintsResponse::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->defaultConstraints.size())); - std::vector ::const_iterator _iter403; - for (_iter403 = this->defaultConstraints.begin(); _iter403 != this->defaultConstraints.end(); ++_iter403) + std::vector ::const_iterator _iter421; + for (_iter421 = this->defaultConstraints.begin(); _iter421 != this->defaultConstraints.end(); ++_iter421) { - xfer += (*_iter403).write(oprot); + xfer += (*_iter421).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10489,11 +11439,11 @@ void swap(DefaultConstraintsResponse &a, DefaultConstraintsResponse &b) { swap(a.defaultConstraints, b.defaultConstraints); } -DefaultConstraintsResponse::DefaultConstraintsResponse(const DefaultConstraintsResponse& other404) { - defaultConstraints = other404.defaultConstraints; +DefaultConstraintsResponse::DefaultConstraintsResponse(const DefaultConstraintsResponse& other422) { + defaultConstraints = other422.defaultConstraints; } -DefaultConstraintsResponse& DefaultConstraintsResponse::operator=(const DefaultConstraintsResponse& other405) { - defaultConstraints = other405.defaultConstraints; +DefaultConstraintsResponse& DefaultConstraintsResponse::operator=(const DefaultConstraintsResponse& other423) { + defaultConstraints = other423.defaultConstraints; return *this; } void DefaultConstraintsResponse::printTo(std::ostream& out) const { @@ -10508,6 +11458,10 @@ CheckConstraintsRequest::~CheckConstraintsRequest() throw() { } +void CheckConstraintsRequest::__set_catName(const std::string& val) { + this->catName = val; +} + void CheckConstraintsRequest::__set_db_name(const std::string& val) { this->db_name = val; } @@ -10528,6 +11482,7 @@ uint32_t CheckConstraintsRequest::read(::apache::thrift::protocol::TProtocol* ip using ::apache::thrift::protocol::TProtocolException; + bool isset_catName = false; bool isset_db_name = false; bool isset_tbl_name = false; @@ -10541,13 +11496,21 @@ uint32_t CheckConstraintsRequest::read(::apache::thrift::protocol::TProtocol* ip { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + isset_catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->db_name); isset_db_name = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->tbl_name); isset_tbl_name = true; @@ -10564,6 +11527,8 @@ uint32_t CheckConstraintsRequest::read(::apache::thrift::protocol::TProtocol* ip xfer += iprot->readStructEnd(); + if (!isset_catName) + throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_db_name) throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_tbl_name) @@ -10576,11 +11541,15 @@ uint32_t CheckConstraintsRequest::write(::apache::thrift::protocol::TProtocol* o apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("CheckConstraintsRequest"); - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); @@ -10591,23 +11560,27 @@ uint32_t CheckConstraintsRequest::write(::apache::thrift::protocol::TProtocol* o void swap(CheckConstraintsRequest &a, CheckConstraintsRequest &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.db_name, b.db_name); swap(a.tbl_name, b.tbl_name); } -CheckConstraintsRequest::CheckConstraintsRequest(const CheckConstraintsRequest& other406) { - db_name = other406.db_name; - tbl_name = other406.tbl_name; +CheckConstraintsRequest::CheckConstraintsRequest(const CheckConstraintsRequest& other424) { + catName = other424.catName; + db_name = other424.db_name; + tbl_name = other424.tbl_name; } -CheckConstraintsRequest& CheckConstraintsRequest::operator=(const CheckConstraintsRequest& other407) { - db_name = other407.db_name; - tbl_name = other407.tbl_name; +CheckConstraintsRequest& CheckConstraintsRequest::operator=(const CheckConstraintsRequest& other425) { + catName = other425.catName; + db_name = other425.db_name; + tbl_name = other425.tbl_name; return *this; } void CheckConstraintsRequest::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "CheckConstraintsRequest("; - out << "db_name=" << to_string(db_name); + out << "catName=" << to_string(catName); + out << ", " << "db_name=" << to_string(db_name); out << ", " << "tbl_name=" << to_string(tbl_name); out << ")"; } @@ -10647,14 +11620,14 @@ uint32_t CheckConstraintsResponse::read(::apache::thrift::protocol::TProtocol* i if (ftype == ::apache::thrift::protocol::T_LIST) { { this->checkConstraints.clear(); - uint32_t _size408; - ::apache::thrift::protocol::TType _etype411; - xfer += iprot->readListBegin(_etype411, _size408); - this->checkConstraints.resize(_size408); - uint32_t _i412; - for (_i412 = 0; _i412 < _size408; ++_i412) + uint32_t _size426; + ::apache::thrift::protocol::TType _etype429; + xfer += iprot->readListBegin(_etype429, _size426); + this->checkConstraints.resize(_size426); + uint32_t _i430; + for (_i430 = 0; _i430 < _size426; ++_i430) { - xfer += this->checkConstraints[_i412].read(iprot); + xfer += this->checkConstraints[_i430].read(iprot); } xfer += iprot->readListEnd(); } @@ -10685,10 +11658,10 @@ uint32_t CheckConstraintsResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("checkConstraints", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->checkConstraints.size())); - std::vector ::const_iterator _iter413; - for (_iter413 = this->checkConstraints.begin(); _iter413 != this->checkConstraints.end(); ++_iter413) + std::vector ::const_iterator _iter431; + for (_iter431 = this->checkConstraints.begin(); _iter431 != this->checkConstraints.end(); ++_iter431) { - xfer += (*_iter413).write(oprot); + xfer += (*_iter431).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10704,11 +11677,11 @@ void swap(CheckConstraintsResponse &a, CheckConstraintsResponse &b) { swap(a.checkConstraints, b.checkConstraints); } -CheckConstraintsResponse::CheckConstraintsResponse(const CheckConstraintsResponse& other414) { - checkConstraints = other414.checkConstraints; +CheckConstraintsResponse::CheckConstraintsResponse(const CheckConstraintsResponse& other432) { + checkConstraints = other432.checkConstraints; } -CheckConstraintsResponse& CheckConstraintsResponse::operator=(const CheckConstraintsResponse& other415) { - checkConstraints = other415.checkConstraints; +CheckConstraintsResponse& CheckConstraintsResponse::operator=(const CheckConstraintsResponse& other433) { + checkConstraints = other433.checkConstraints; return *this; } void CheckConstraintsResponse::printTo(std::ostream& out) const { @@ -10735,6 +11708,11 @@ void DropConstraintRequest::__set_constraintname(const std::string& val) { this->constraintname = val; } +void DropConstraintRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t DropConstraintRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -10783,6 +11761,14 @@ uint32_t DropConstraintRequest::read(::apache::thrift::protocol::TProtocol* ipro xfer += iprot->skip(ftype); } break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -10818,6 +11804,11 @@ uint32_t DropConstraintRequest::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeString(this->constraintname); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -10828,17 +11819,23 @@ void swap(DropConstraintRequest &a, DropConstraintRequest &b) { swap(a.dbname, b.dbname); swap(a.tablename, b.tablename); swap(a.constraintname, b.constraintname); + swap(a.catName, b.catName); + swap(a.__isset, b.__isset); } -DropConstraintRequest::DropConstraintRequest(const DropConstraintRequest& other416) { - dbname = other416.dbname; - tablename = other416.tablename; - constraintname = other416.constraintname; +DropConstraintRequest::DropConstraintRequest(const DropConstraintRequest& other434) { + dbname = other434.dbname; + tablename = other434.tablename; + constraintname = other434.constraintname; + catName = other434.catName; + __isset = other434.__isset; } -DropConstraintRequest& DropConstraintRequest::operator=(const DropConstraintRequest& other417) { - dbname = other417.dbname; - tablename = other417.tablename; - constraintname = other417.constraintname; +DropConstraintRequest& DropConstraintRequest::operator=(const DropConstraintRequest& other435) { + dbname = other435.dbname; + tablename = other435.tablename; + constraintname = other435.constraintname; + catName = other435.catName; + __isset = other435.__isset; return *this; } void DropConstraintRequest::printTo(std::ostream& out) const { @@ -10847,6 +11844,7 @@ void DropConstraintRequest::printTo(std::ostream& out) const { out << "dbname=" << to_string(dbname); out << ", " << "tablename=" << to_string(tablename); out << ", " << "constraintname=" << to_string(constraintname); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -10885,14 +11883,14 @@ uint32_t AddPrimaryKeyRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeyCols.clear(); - uint32_t _size418; - ::apache::thrift::protocol::TType _etype421; - xfer += iprot->readListBegin(_etype421, _size418); - this->primaryKeyCols.resize(_size418); - uint32_t _i422; - for (_i422 = 0; _i422 < _size418; ++_i422) + uint32_t _size436; + ::apache::thrift::protocol::TType _etype439; + xfer += iprot->readListBegin(_etype439, _size436); + this->primaryKeyCols.resize(_size436); + uint32_t _i440; + for (_i440 = 0; _i440 < _size436; ++_i440) { - xfer += this->primaryKeyCols[_i422].read(iprot); + xfer += this->primaryKeyCols[_i440].read(iprot); } xfer += iprot->readListEnd(); } @@ -10923,10 +11921,10 @@ uint32_t AddPrimaryKeyRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("primaryKeyCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeyCols.size())); - std::vector ::const_iterator _iter423; - for (_iter423 = this->primaryKeyCols.begin(); _iter423 != this->primaryKeyCols.end(); ++_iter423) + std::vector ::const_iterator _iter441; + for (_iter441 = this->primaryKeyCols.begin(); _iter441 != this->primaryKeyCols.end(); ++_iter441) { - xfer += (*_iter423).write(oprot); + xfer += (*_iter441).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10942,11 +11940,11 @@ void swap(AddPrimaryKeyRequest &a, AddPrimaryKeyRequest &b) { swap(a.primaryKeyCols, b.primaryKeyCols); } -AddPrimaryKeyRequest::AddPrimaryKeyRequest(const AddPrimaryKeyRequest& other424) { - primaryKeyCols = other424.primaryKeyCols; +AddPrimaryKeyRequest::AddPrimaryKeyRequest(const AddPrimaryKeyRequest& other442) { + primaryKeyCols = other442.primaryKeyCols; } -AddPrimaryKeyRequest& AddPrimaryKeyRequest::operator=(const AddPrimaryKeyRequest& other425) { - primaryKeyCols = other425.primaryKeyCols; +AddPrimaryKeyRequest& AddPrimaryKeyRequest::operator=(const AddPrimaryKeyRequest& other443) { + primaryKeyCols = other443.primaryKeyCols; return *this; } void AddPrimaryKeyRequest::printTo(std::ostream& out) const { @@ -10991,14 +11989,14 @@ uint32_t AddForeignKeyRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeyCols.clear(); - uint32_t _size426; - ::apache::thrift::protocol::TType _etype429; - xfer += iprot->readListBegin(_etype429, _size426); - this->foreignKeyCols.resize(_size426); - uint32_t _i430; - for (_i430 = 0; _i430 < _size426; ++_i430) + uint32_t _size444; + ::apache::thrift::protocol::TType _etype447; + xfer += iprot->readListBegin(_etype447, _size444); + this->foreignKeyCols.resize(_size444); + uint32_t _i448; + for (_i448 = 0; _i448 < _size444; ++_i448) { - xfer += this->foreignKeyCols[_i430].read(iprot); + xfer += this->foreignKeyCols[_i448].read(iprot); } xfer += iprot->readListEnd(); } @@ -11029,10 +12027,10 @@ uint32_t AddForeignKeyRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("foreignKeyCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeyCols.size())); - std::vector ::const_iterator _iter431; - for (_iter431 = this->foreignKeyCols.begin(); _iter431 != this->foreignKeyCols.end(); ++_iter431) + std::vector ::const_iterator _iter449; + for (_iter449 = this->foreignKeyCols.begin(); _iter449 != this->foreignKeyCols.end(); ++_iter449) { - xfer += (*_iter431).write(oprot); + xfer += (*_iter449).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11048,11 +12046,11 @@ void swap(AddForeignKeyRequest &a, AddForeignKeyRequest &b) { swap(a.foreignKeyCols, b.foreignKeyCols); } -AddForeignKeyRequest::AddForeignKeyRequest(const AddForeignKeyRequest& other432) { - foreignKeyCols = other432.foreignKeyCols; +AddForeignKeyRequest::AddForeignKeyRequest(const AddForeignKeyRequest& other450) { + foreignKeyCols = other450.foreignKeyCols; } -AddForeignKeyRequest& AddForeignKeyRequest::operator=(const AddForeignKeyRequest& other433) { - foreignKeyCols = other433.foreignKeyCols; +AddForeignKeyRequest& AddForeignKeyRequest::operator=(const AddForeignKeyRequest& other451) { + foreignKeyCols = other451.foreignKeyCols; return *this; } void AddForeignKeyRequest::printTo(std::ostream& out) const { @@ -11097,14 +12095,14 @@ uint32_t AddUniqueConstraintRequest::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraintCols.clear(); - uint32_t _size434; - ::apache::thrift::protocol::TType _etype437; - xfer += iprot->readListBegin(_etype437, _size434); - this->uniqueConstraintCols.resize(_size434); - uint32_t _i438; - for (_i438 = 0; _i438 < _size434; ++_i438) + uint32_t _size452; + ::apache::thrift::protocol::TType _etype455; + xfer += iprot->readListBegin(_etype455, _size452); + this->uniqueConstraintCols.resize(_size452); + uint32_t _i456; + for (_i456 = 0; _i456 < _size452; ++_i456) { - xfer += this->uniqueConstraintCols[_i438].read(iprot); + xfer += this->uniqueConstraintCols[_i456].read(iprot); } xfer += iprot->readListEnd(); } @@ -11135,10 +12133,10 @@ uint32_t AddUniqueConstraintRequest::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("uniqueConstraintCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraintCols.size())); - std::vector ::const_iterator _iter439; - for (_iter439 = this->uniqueConstraintCols.begin(); _iter439 != this->uniqueConstraintCols.end(); ++_iter439) + std::vector ::const_iterator _iter457; + for (_iter457 = this->uniqueConstraintCols.begin(); _iter457 != this->uniqueConstraintCols.end(); ++_iter457) { - xfer += (*_iter439).write(oprot); + xfer += (*_iter457).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11154,11 +12152,11 @@ void swap(AddUniqueConstraintRequest &a, AddUniqueConstraintRequest &b) { swap(a.uniqueConstraintCols, b.uniqueConstraintCols); } -AddUniqueConstraintRequest::AddUniqueConstraintRequest(const AddUniqueConstraintRequest& other440) { - uniqueConstraintCols = other440.uniqueConstraintCols; +AddUniqueConstraintRequest::AddUniqueConstraintRequest(const AddUniqueConstraintRequest& other458) { + uniqueConstraintCols = other458.uniqueConstraintCols; } -AddUniqueConstraintRequest& AddUniqueConstraintRequest::operator=(const AddUniqueConstraintRequest& other441) { - uniqueConstraintCols = other441.uniqueConstraintCols; +AddUniqueConstraintRequest& AddUniqueConstraintRequest::operator=(const AddUniqueConstraintRequest& other459) { + uniqueConstraintCols = other459.uniqueConstraintCols; return *this; } void AddUniqueConstraintRequest::printTo(std::ostream& out) const { @@ -11203,14 +12201,14 @@ uint32_t AddNotNullConstraintRequest::read(::apache::thrift::protocol::TProtocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraintCols.clear(); - uint32_t _size442; - ::apache::thrift::protocol::TType _etype445; - xfer += iprot->readListBegin(_etype445, _size442); - this->notNullConstraintCols.resize(_size442); - uint32_t _i446; - for (_i446 = 0; _i446 < _size442; ++_i446) + uint32_t _size460; + ::apache::thrift::protocol::TType _etype463; + xfer += iprot->readListBegin(_etype463, _size460); + this->notNullConstraintCols.resize(_size460); + uint32_t _i464; + for (_i464 = 0; _i464 < _size460; ++_i464) { - xfer += this->notNullConstraintCols[_i446].read(iprot); + xfer += this->notNullConstraintCols[_i464].read(iprot); } xfer += iprot->readListEnd(); } @@ -11241,10 +12239,10 @@ uint32_t AddNotNullConstraintRequest::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeFieldBegin("notNullConstraintCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraintCols.size())); - std::vector ::const_iterator _iter447; - for (_iter447 = this->notNullConstraintCols.begin(); _iter447 != this->notNullConstraintCols.end(); ++_iter447) + std::vector ::const_iterator _iter465; + for (_iter465 = this->notNullConstraintCols.begin(); _iter465 != this->notNullConstraintCols.end(); ++_iter465) { - xfer += (*_iter447).write(oprot); + xfer += (*_iter465).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11260,11 +12258,11 @@ void swap(AddNotNullConstraintRequest &a, AddNotNullConstraintRequest &b) { swap(a.notNullConstraintCols, b.notNullConstraintCols); } -AddNotNullConstraintRequest::AddNotNullConstraintRequest(const AddNotNullConstraintRequest& other448) { - notNullConstraintCols = other448.notNullConstraintCols; +AddNotNullConstraintRequest::AddNotNullConstraintRequest(const AddNotNullConstraintRequest& other466) { + notNullConstraintCols = other466.notNullConstraintCols; } -AddNotNullConstraintRequest& AddNotNullConstraintRequest::operator=(const AddNotNullConstraintRequest& other449) { - notNullConstraintCols = other449.notNullConstraintCols; +AddNotNullConstraintRequest& AddNotNullConstraintRequest::operator=(const AddNotNullConstraintRequest& other467) { + notNullConstraintCols = other467.notNullConstraintCols; return *this; } void AddNotNullConstraintRequest::printTo(std::ostream& out) const { @@ -11309,14 +12307,14 @@ uint32_t AddDefaultConstraintRequest::read(::apache::thrift::protocol::TProtocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->defaultConstraintCols.clear(); - uint32_t _size450; - ::apache::thrift::protocol::TType _etype453; - xfer += iprot->readListBegin(_etype453, _size450); - this->defaultConstraintCols.resize(_size450); - uint32_t _i454; - for (_i454 = 0; _i454 < _size450; ++_i454) + uint32_t _size468; + ::apache::thrift::protocol::TType _etype471; + xfer += iprot->readListBegin(_etype471, _size468); + this->defaultConstraintCols.resize(_size468); + uint32_t _i472; + for (_i472 = 0; _i472 < _size468; ++_i472) { - xfer += this->defaultConstraintCols[_i454].read(iprot); + xfer += this->defaultConstraintCols[_i472].read(iprot); } xfer += iprot->readListEnd(); } @@ -11347,10 +12345,10 @@ uint32_t AddDefaultConstraintRequest::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeFieldBegin("defaultConstraintCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->defaultConstraintCols.size())); - std::vector ::const_iterator _iter455; - for (_iter455 = this->defaultConstraintCols.begin(); _iter455 != this->defaultConstraintCols.end(); ++_iter455) + std::vector ::const_iterator _iter473; + for (_iter473 = this->defaultConstraintCols.begin(); _iter473 != this->defaultConstraintCols.end(); ++_iter473) { - xfer += (*_iter455).write(oprot); + xfer += (*_iter473).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11366,11 +12364,11 @@ void swap(AddDefaultConstraintRequest &a, AddDefaultConstraintRequest &b) { swap(a.defaultConstraintCols, b.defaultConstraintCols); } -AddDefaultConstraintRequest::AddDefaultConstraintRequest(const AddDefaultConstraintRequest& other456) { - defaultConstraintCols = other456.defaultConstraintCols; +AddDefaultConstraintRequest::AddDefaultConstraintRequest(const AddDefaultConstraintRequest& other474) { + defaultConstraintCols = other474.defaultConstraintCols; } -AddDefaultConstraintRequest& AddDefaultConstraintRequest::operator=(const AddDefaultConstraintRequest& other457) { - defaultConstraintCols = other457.defaultConstraintCols; +AddDefaultConstraintRequest& AddDefaultConstraintRequest::operator=(const AddDefaultConstraintRequest& other475) { + defaultConstraintCols = other475.defaultConstraintCols; return *this; } void AddDefaultConstraintRequest::printTo(std::ostream& out) const { @@ -11415,14 +12413,14 @@ uint32_t AddCheckConstraintRequest::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->checkConstraintCols.clear(); - uint32_t _size458; - ::apache::thrift::protocol::TType _etype461; - xfer += iprot->readListBegin(_etype461, _size458); - this->checkConstraintCols.resize(_size458); - uint32_t _i462; - for (_i462 = 0; _i462 < _size458; ++_i462) + uint32_t _size476; + ::apache::thrift::protocol::TType _etype479; + xfer += iprot->readListBegin(_etype479, _size476); + this->checkConstraintCols.resize(_size476); + uint32_t _i480; + for (_i480 = 0; _i480 < _size476; ++_i480) { - xfer += this->checkConstraintCols[_i462].read(iprot); + xfer += this->checkConstraintCols[_i480].read(iprot); } xfer += iprot->readListEnd(); } @@ -11453,10 +12451,10 @@ uint32_t AddCheckConstraintRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("checkConstraintCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->checkConstraintCols.size())); - std::vector ::const_iterator _iter463; - for (_iter463 = this->checkConstraintCols.begin(); _iter463 != this->checkConstraintCols.end(); ++_iter463) + std::vector ::const_iterator _iter481; + for (_iter481 = this->checkConstraintCols.begin(); _iter481 != this->checkConstraintCols.end(); ++_iter481) { - xfer += (*_iter463).write(oprot); + xfer += (*_iter481).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11472,11 +12470,11 @@ void swap(AddCheckConstraintRequest &a, AddCheckConstraintRequest &b) { swap(a.checkConstraintCols, b.checkConstraintCols); } -AddCheckConstraintRequest::AddCheckConstraintRequest(const AddCheckConstraintRequest& other464) { - checkConstraintCols = other464.checkConstraintCols; +AddCheckConstraintRequest::AddCheckConstraintRequest(const AddCheckConstraintRequest& other482) { + checkConstraintCols = other482.checkConstraintCols; } -AddCheckConstraintRequest& AddCheckConstraintRequest::operator=(const AddCheckConstraintRequest& other465) { - checkConstraintCols = other465.checkConstraintCols; +AddCheckConstraintRequest& AddCheckConstraintRequest::operator=(const AddCheckConstraintRequest& other483) { + checkConstraintCols = other483.checkConstraintCols; return *this; } void AddCheckConstraintRequest::printTo(std::ostream& out) const { @@ -11526,14 +12524,14 @@ uint32_t PartitionsByExprResult::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size466; - ::apache::thrift::protocol::TType _etype469; - xfer += iprot->readListBegin(_etype469, _size466); - this->partitions.resize(_size466); - uint32_t _i470; - for (_i470 = 0; _i470 < _size466; ++_i470) + uint32_t _size484; + ::apache::thrift::protocol::TType _etype487; + xfer += iprot->readListBegin(_etype487, _size484); + this->partitions.resize(_size484); + uint32_t _i488; + for (_i488 = 0; _i488 < _size484; ++_i488) { - xfer += this->partitions[_i470].read(iprot); + xfer += this->partitions[_i488].read(iprot); } xfer += iprot->readListEnd(); } @@ -11574,10 +12572,10 @@ uint32_t PartitionsByExprResult::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter471; - for (_iter471 = this->partitions.begin(); _iter471 != this->partitions.end(); ++_iter471) + std::vector ::const_iterator _iter489; + for (_iter489 = this->partitions.begin(); _iter489 != this->partitions.end(); ++_iter489) { - xfer += (*_iter471).write(oprot); + xfer += (*_iter489).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11598,13 +12596,13 @@ void swap(PartitionsByExprResult &a, PartitionsByExprResult &b) { swap(a.hasUnknownPartitions, b.hasUnknownPartitions); } -PartitionsByExprResult::PartitionsByExprResult(const PartitionsByExprResult& other472) { - partitions = other472.partitions; - hasUnknownPartitions = other472.hasUnknownPartitions; +PartitionsByExprResult::PartitionsByExprResult(const PartitionsByExprResult& other490) { + partitions = other490.partitions; + hasUnknownPartitions = other490.hasUnknownPartitions; } -PartitionsByExprResult& PartitionsByExprResult::operator=(const PartitionsByExprResult& other473) { - partitions = other473.partitions; - hasUnknownPartitions = other473.hasUnknownPartitions; +PartitionsByExprResult& PartitionsByExprResult::operator=(const PartitionsByExprResult& other491) { + partitions = other491.partitions; + hasUnknownPartitions = other491.hasUnknownPartitions; return *this; } void PartitionsByExprResult::printTo(std::ostream& out) const { @@ -11642,6 +12640,11 @@ void PartitionsByExprRequest::__set_maxParts(const int16_t val) { __isset.maxParts = true; } +void PartitionsByExprRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t PartitionsByExprRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -11706,6 +12709,14 @@ uint32_t PartitionsByExprRequest::read(::apache::thrift::protocol::TProtocol* ip xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -11751,6 +12762,11 @@ uint32_t PartitionsByExprRequest::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeI16(this->maxParts); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -11763,24 +12779,27 @@ void swap(PartitionsByExprRequest &a, PartitionsByExprRequest &b) { swap(a.expr, b.expr); swap(a.defaultPartitionName, b.defaultPartitionName); swap(a.maxParts, b.maxParts); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -PartitionsByExprRequest::PartitionsByExprRequest(const PartitionsByExprRequest& other474) { - dbName = other474.dbName; - tblName = other474.tblName; - expr = other474.expr; - defaultPartitionName = other474.defaultPartitionName; - maxParts = other474.maxParts; - __isset = other474.__isset; -} -PartitionsByExprRequest& PartitionsByExprRequest::operator=(const PartitionsByExprRequest& other475) { - dbName = other475.dbName; - tblName = other475.tblName; - expr = other475.expr; - defaultPartitionName = other475.defaultPartitionName; - maxParts = other475.maxParts; - __isset = other475.__isset; +PartitionsByExprRequest::PartitionsByExprRequest(const PartitionsByExprRequest& other492) { + dbName = other492.dbName; + tblName = other492.tblName; + expr = other492.expr; + defaultPartitionName = other492.defaultPartitionName; + maxParts = other492.maxParts; + catName = other492.catName; + __isset = other492.__isset; +} +PartitionsByExprRequest& PartitionsByExprRequest::operator=(const PartitionsByExprRequest& other493) { + dbName = other493.dbName; + tblName = other493.tblName; + expr = other493.expr; + defaultPartitionName = other493.defaultPartitionName; + maxParts = other493.maxParts; + catName = other493.catName; + __isset = other493.__isset; return *this; } void PartitionsByExprRequest::printTo(std::ostream& out) const { @@ -11791,6 +12810,7 @@ void PartitionsByExprRequest::printTo(std::ostream& out) const { out << ", " << "expr=" << to_string(expr); out << ", " << "defaultPartitionName="; (__isset.defaultPartitionName ? (out << to_string(defaultPartitionName)) : (out << "")); out << ", " << "maxParts="; (__isset.maxParts ? (out << to_string(maxParts)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -11829,14 +12849,14 @@ uint32_t TableStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tableStats.clear(); - uint32_t _size476; - ::apache::thrift::protocol::TType _etype479; - xfer += iprot->readListBegin(_etype479, _size476); - this->tableStats.resize(_size476); - uint32_t _i480; - for (_i480 = 0; _i480 < _size476; ++_i480) + uint32_t _size494; + ::apache::thrift::protocol::TType _etype497; + xfer += iprot->readListBegin(_etype497, _size494); + this->tableStats.resize(_size494); + uint32_t _i498; + for (_i498 = 0; _i498 < _size494; ++_i498) { - xfer += this->tableStats[_i480].read(iprot); + xfer += this->tableStats[_i498].read(iprot); } xfer += iprot->readListEnd(); } @@ -11867,10 +12887,10 @@ uint32_t TableStatsResult::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("tableStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tableStats.size())); - std::vector ::const_iterator _iter481; - for (_iter481 = this->tableStats.begin(); _iter481 != this->tableStats.end(); ++_iter481) + std::vector ::const_iterator _iter499; + for (_iter499 = this->tableStats.begin(); _iter499 != this->tableStats.end(); ++_iter499) { - xfer += (*_iter481).write(oprot); + xfer += (*_iter499).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11886,11 +12906,11 @@ void swap(TableStatsResult &a, TableStatsResult &b) { swap(a.tableStats, b.tableStats); } -TableStatsResult::TableStatsResult(const TableStatsResult& other482) { - tableStats = other482.tableStats; +TableStatsResult::TableStatsResult(const TableStatsResult& other500) { + tableStats = other500.tableStats; } -TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other483) { - tableStats = other483.tableStats; +TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other501) { + tableStats = other501.tableStats; return *this; } void TableStatsResult::printTo(std::ostream& out) const { @@ -11935,26 +12955,26 @@ uint32_t PartitionsStatsResult::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partStats.clear(); - uint32_t _size484; - ::apache::thrift::protocol::TType _ktype485; - ::apache::thrift::protocol::TType _vtype486; - xfer += iprot->readMapBegin(_ktype485, _vtype486, _size484); - uint32_t _i488; - for (_i488 = 0; _i488 < _size484; ++_i488) + uint32_t _size502; + ::apache::thrift::protocol::TType _ktype503; + ::apache::thrift::protocol::TType _vtype504; + xfer += iprot->readMapBegin(_ktype503, _vtype504, _size502); + uint32_t _i506; + for (_i506 = 0; _i506 < _size502; ++_i506) { - std::string _key489; - xfer += iprot->readString(_key489); - std::vector & _val490 = this->partStats[_key489]; + std::string _key507; + xfer += iprot->readString(_key507); + std::vector & _val508 = this->partStats[_key507]; { - _val490.clear(); - uint32_t _size491; - ::apache::thrift::protocol::TType _etype494; - xfer += iprot->readListBegin(_etype494, _size491); - _val490.resize(_size491); - uint32_t _i495; - for (_i495 = 0; _i495 < _size491; ++_i495) + _val508.clear(); + uint32_t _size509; + ::apache::thrift::protocol::TType _etype512; + xfer += iprot->readListBegin(_etype512, _size509); + _val508.resize(_size509); + uint32_t _i513; + for (_i513 = 0; _i513 < _size509; ++_i513) { - xfer += _val490[_i495].read(iprot); + xfer += _val508[_i513].read(iprot); } xfer += iprot->readListEnd(); } @@ -11988,16 +13008,16 @@ uint32_t PartitionsStatsResult::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("partStats", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->partStats.size())); - std::map > ::const_iterator _iter496; - for (_iter496 = this->partStats.begin(); _iter496 != this->partStats.end(); ++_iter496) + std::map > ::const_iterator _iter514; + for (_iter514 = this->partStats.begin(); _iter514 != this->partStats.end(); ++_iter514) { - xfer += oprot->writeString(_iter496->first); + xfer += oprot->writeString(_iter514->first); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter496->second.size())); - std::vector ::const_iterator _iter497; - for (_iter497 = _iter496->second.begin(); _iter497 != _iter496->second.end(); ++_iter497) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter514->second.size())); + std::vector ::const_iterator _iter515; + for (_iter515 = _iter514->second.begin(); _iter515 != _iter514->second.end(); ++_iter515) { - xfer += (*_iter497).write(oprot); + xfer += (*_iter515).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12016,11 +13036,11 @@ void swap(PartitionsStatsResult &a, PartitionsStatsResult &b) { swap(a.partStats, b.partStats); } -PartitionsStatsResult::PartitionsStatsResult(const PartitionsStatsResult& other498) { - partStats = other498.partStats; +PartitionsStatsResult::PartitionsStatsResult(const PartitionsStatsResult& other516) { + partStats = other516.partStats; } -PartitionsStatsResult& PartitionsStatsResult::operator=(const PartitionsStatsResult& other499) { - partStats = other499.partStats; +PartitionsStatsResult& PartitionsStatsResult::operator=(const PartitionsStatsResult& other517) { + partStats = other517.partStats; return *this; } void PartitionsStatsResult::printTo(std::ostream& out) const { @@ -12047,6 +13067,11 @@ void TableStatsRequest::__set_colNames(const std::vector & val) { this->colNames = val; } +void TableStatsRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -12091,14 +13116,14 @@ uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colNames.clear(); - uint32_t _size500; - ::apache::thrift::protocol::TType _etype503; - xfer += iprot->readListBegin(_etype503, _size500); - this->colNames.resize(_size500); - uint32_t _i504; - for (_i504 = 0; _i504 < _size500; ++_i504) + uint32_t _size518; + ::apache::thrift::protocol::TType _etype521; + xfer += iprot->readListBegin(_etype521, _size518); + this->colNames.resize(_size518); + uint32_t _i522; + for (_i522 = 0; _i522 < _size518; ++_i522) { - xfer += iprot->readString(this->colNames[_i504]); + xfer += iprot->readString(this->colNames[_i522]); } xfer += iprot->readListEnd(); } @@ -12107,6 +13132,14 @@ uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -12141,15 +13174,20 @@ uint32_t TableStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->colNames.size())); - std::vector ::const_iterator _iter505; - for (_iter505 = this->colNames.begin(); _iter505 != this->colNames.end(); ++_iter505) + std::vector ::const_iterator _iter523; + for (_iter523 = this->colNames.begin(); _iter523 != this->colNames.end(); ++_iter523) { - xfer += oprot->writeString((*_iter505)); + xfer += oprot->writeString((*_iter523)); } xfer += oprot->writeListEnd(); } xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -12160,17 +13198,23 @@ void swap(TableStatsRequest &a, TableStatsRequest &b) { swap(a.dbName, b.dbName); swap(a.tblName, b.tblName); swap(a.colNames, b.colNames); + swap(a.catName, b.catName); + swap(a.__isset, b.__isset); } -TableStatsRequest::TableStatsRequest(const TableStatsRequest& other506) { - dbName = other506.dbName; - tblName = other506.tblName; - colNames = other506.colNames; +TableStatsRequest::TableStatsRequest(const TableStatsRequest& other524) { + dbName = other524.dbName; + tblName = other524.tblName; + colNames = other524.colNames; + catName = other524.catName; + __isset = other524.__isset; } -TableStatsRequest& TableStatsRequest::operator=(const TableStatsRequest& other507) { - dbName = other507.dbName; - tblName = other507.tblName; - colNames = other507.colNames; +TableStatsRequest& TableStatsRequest::operator=(const TableStatsRequest& other525) { + dbName = other525.dbName; + tblName = other525.tblName; + colNames = other525.colNames; + catName = other525.catName; + __isset = other525.__isset; return *this; } void TableStatsRequest::printTo(std::ostream& out) const { @@ -12179,6 +13223,7 @@ void TableStatsRequest::printTo(std::ostream& out) const { out << "dbName=" << to_string(dbName); out << ", " << "tblName=" << to_string(tblName); out << ", " << "colNames=" << to_string(colNames); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -12203,6 +13248,11 @@ void PartitionsStatsRequest::__set_partNames(const std::vector & va this->partNames = val; } +void PartitionsStatsRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -12248,14 +13298,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colNames.clear(); - uint32_t _size508; - ::apache::thrift::protocol::TType _etype511; - xfer += iprot->readListBegin(_etype511, _size508); - this->colNames.resize(_size508); - uint32_t _i512; - for (_i512 = 0; _i512 < _size508; ++_i512) + uint32_t _size526; + ::apache::thrift::protocol::TType _etype529; + xfer += iprot->readListBegin(_etype529, _size526); + this->colNames.resize(_size526); + uint32_t _i530; + for (_i530 = 0; _i530 < _size526; ++_i530) { - xfer += iprot->readString(this->colNames[_i512]); + xfer += iprot->readString(this->colNames[_i530]); } xfer += iprot->readListEnd(); } @@ -12268,14 +13318,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size513; - ::apache::thrift::protocol::TType _etype516; - xfer += iprot->readListBegin(_etype516, _size513); - this->partNames.resize(_size513); - uint32_t _i517; - for (_i517 = 0; _i517 < _size513; ++_i517) + uint32_t _size531; + ::apache::thrift::protocol::TType _etype534; + xfer += iprot->readListBegin(_etype534, _size531); + this->partNames.resize(_size531); + uint32_t _i535; + for (_i535 = 0; _i535 < _size531; ++_i535) { - xfer += iprot->readString(this->partNames[_i517]); + xfer += iprot->readString(this->partNames[_i535]); } xfer += iprot->readListEnd(); } @@ -12284,6 +13334,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr xfer += iprot->skip(ftype); } break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -12320,10 +13378,10 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->colNames.size())); - std::vector ::const_iterator _iter518; - for (_iter518 = this->colNames.begin(); _iter518 != this->colNames.end(); ++_iter518) + std::vector ::const_iterator _iter536; + for (_iter536 = this->colNames.begin(); _iter536 != this->colNames.end(); ++_iter536) { - xfer += oprot->writeString((*_iter518)); + xfer += oprot->writeString((*_iter536)); } xfer += oprot->writeListEnd(); } @@ -12332,15 +13390,20 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter519; - for (_iter519 = this->partNames.begin(); _iter519 != this->partNames.end(); ++_iter519) + std::vector ::const_iterator _iter537; + for (_iter537 = this->partNames.begin(); _iter537 != this->partNames.end(); ++_iter537) { - xfer += oprot->writeString((*_iter519)); + xfer += oprot->writeString((*_iter537)); } xfer += oprot->writeListEnd(); } xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -12352,19 +13415,25 @@ void swap(PartitionsStatsRequest &a, PartitionsStatsRequest &b) { swap(a.tblName, b.tblName); swap(a.colNames, b.colNames); swap(a.partNames, b.partNames); + swap(a.catName, b.catName); + swap(a.__isset, b.__isset); } -PartitionsStatsRequest::PartitionsStatsRequest(const PartitionsStatsRequest& other520) { - dbName = other520.dbName; - tblName = other520.tblName; - colNames = other520.colNames; - partNames = other520.partNames; -} -PartitionsStatsRequest& PartitionsStatsRequest::operator=(const PartitionsStatsRequest& other521) { - dbName = other521.dbName; - tblName = other521.tblName; - colNames = other521.colNames; - partNames = other521.partNames; +PartitionsStatsRequest::PartitionsStatsRequest(const PartitionsStatsRequest& other538) { + dbName = other538.dbName; + tblName = other538.tblName; + colNames = other538.colNames; + partNames = other538.partNames; + catName = other538.catName; + __isset = other538.__isset; +} +PartitionsStatsRequest& PartitionsStatsRequest::operator=(const PartitionsStatsRequest& other539) { + dbName = other539.dbName; + tblName = other539.tblName; + colNames = other539.colNames; + partNames = other539.partNames; + catName = other539.catName; + __isset = other539.__isset; return *this; } void PartitionsStatsRequest::printTo(std::ostream& out) const { @@ -12374,6 +13443,7 @@ void PartitionsStatsRequest::printTo(std::ostream& out) const { out << ", " << "tblName=" << to_string(tblName); out << ", " << "colNames=" << to_string(colNames); out << ", " << "partNames=" << to_string(partNames); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -12412,14 +13482,14 @@ uint32_t AddPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size522; - ::apache::thrift::protocol::TType _etype525; - xfer += iprot->readListBegin(_etype525, _size522); - this->partitions.resize(_size522); - uint32_t _i526; - for (_i526 = 0; _i526 < _size522; ++_i526) + uint32_t _size540; + ::apache::thrift::protocol::TType _etype543; + xfer += iprot->readListBegin(_etype543, _size540); + this->partitions.resize(_size540); + uint32_t _i544; + for (_i544 = 0; _i544 < _size540; ++_i544) { - xfer += this->partitions[_i526].read(iprot); + xfer += this->partitions[_i544].read(iprot); } xfer += iprot->readListEnd(); } @@ -12449,10 +13519,10 @@ uint32_t AddPartitionsResult::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter527; - for (_iter527 = this->partitions.begin(); _iter527 != this->partitions.end(); ++_iter527) + std::vector ::const_iterator _iter545; + for (_iter545 = this->partitions.begin(); _iter545 != this->partitions.end(); ++_iter545) { - xfer += (*_iter527).write(oprot); + xfer += (*_iter545).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12469,13 +13539,13 @@ void swap(AddPartitionsResult &a, AddPartitionsResult &b) { swap(a.__isset, b.__isset); } -AddPartitionsResult::AddPartitionsResult(const AddPartitionsResult& other528) { - partitions = other528.partitions; - __isset = other528.__isset; +AddPartitionsResult::AddPartitionsResult(const AddPartitionsResult& other546) { + partitions = other546.partitions; + __isset = other546.__isset; } -AddPartitionsResult& AddPartitionsResult::operator=(const AddPartitionsResult& other529) { - partitions = other529.partitions; - __isset = other529.__isset; +AddPartitionsResult& AddPartitionsResult::operator=(const AddPartitionsResult& other547) { + partitions = other547.partitions; + __isset = other547.__isset; return *this; } void AddPartitionsResult::printTo(std::ostream& out) const { @@ -12511,6 +13581,11 @@ void AddPartitionsRequest::__set_needResult(const bool val) { __isset.needResult = true; } +void AddPartitionsRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -12556,14 +13631,14 @@ uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->parts.clear(); - uint32_t _size530; - ::apache::thrift::protocol::TType _etype533; - xfer += iprot->readListBegin(_etype533, _size530); - this->parts.resize(_size530); - uint32_t _i534; - for (_i534 = 0; _i534 < _size530; ++_i534) + uint32_t _size548; + ::apache::thrift::protocol::TType _etype551; + xfer += iprot->readListBegin(_etype551, _size548); + this->parts.resize(_size548); + uint32_t _i552; + for (_i552 = 0; _i552 < _size548; ++_i552) { - xfer += this->parts[_i534].read(iprot); + xfer += this->parts[_i552].read(iprot); } xfer += iprot->readListEnd(); } @@ -12588,6 +13663,14 @@ uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -12624,10 +13707,10 @@ uint32_t AddPartitionsRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->parts.size())); - std::vector ::const_iterator _iter535; - for (_iter535 = this->parts.begin(); _iter535 != this->parts.end(); ++_iter535) + std::vector ::const_iterator _iter553; + for (_iter553 = this->parts.begin(); _iter553 != this->parts.end(); ++_iter553) { - xfer += (*_iter535).write(oprot); + xfer += (*_iter553).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12642,6 +13725,11 @@ uint32_t AddPartitionsRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeBool(this->needResult); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -12654,24 +13742,27 @@ void swap(AddPartitionsRequest &a, AddPartitionsRequest &b) { swap(a.parts, b.parts); swap(a.ifNotExists, b.ifNotExists); swap(a.needResult, b.needResult); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -AddPartitionsRequest::AddPartitionsRequest(const AddPartitionsRequest& other536) { - dbName = other536.dbName; - tblName = other536.tblName; - parts = other536.parts; - ifNotExists = other536.ifNotExists; - needResult = other536.needResult; - __isset = other536.__isset; -} -AddPartitionsRequest& AddPartitionsRequest::operator=(const AddPartitionsRequest& other537) { - dbName = other537.dbName; - tblName = other537.tblName; - parts = other537.parts; - ifNotExists = other537.ifNotExists; - needResult = other537.needResult; - __isset = other537.__isset; +AddPartitionsRequest::AddPartitionsRequest(const AddPartitionsRequest& other554) { + dbName = other554.dbName; + tblName = other554.tblName; + parts = other554.parts; + ifNotExists = other554.ifNotExists; + needResult = other554.needResult; + catName = other554.catName; + __isset = other554.__isset; +} +AddPartitionsRequest& AddPartitionsRequest::operator=(const AddPartitionsRequest& other555) { + dbName = other555.dbName; + tblName = other555.tblName; + parts = other555.parts; + ifNotExists = other555.ifNotExists; + needResult = other555.needResult; + catName = other555.catName; + __isset = other555.__isset; return *this; } void AddPartitionsRequest::printTo(std::ostream& out) const { @@ -12682,6 +13773,7 @@ void AddPartitionsRequest::printTo(std::ostream& out) const { out << ", " << "parts=" << to_string(parts); out << ", " << "ifNotExists=" << to_string(ifNotExists); out << ", " << "needResult="; (__isset.needResult ? (out << to_string(needResult)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -12720,14 +13812,14 @@ uint32_t DropPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size538; - ::apache::thrift::protocol::TType _etype541; - xfer += iprot->readListBegin(_etype541, _size538); - this->partitions.resize(_size538); - uint32_t _i542; - for (_i542 = 0; _i542 < _size538; ++_i542) + uint32_t _size556; + ::apache::thrift::protocol::TType _etype559; + xfer += iprot->readListBegin(_etype559, _size556); + this->partitions.resize(_size556); + uint32_t _i560; + for (_i560 = 0; _i560 < _size556; ++_i560) { - xfer += this->partitions[_i542].read(iprot); + xfer += this->partitions[_i560].read(iprot); } xfer += iprot->readListEnd(); } @@ -12757,10 +13849,10 @@ uint32_t DropPartitionsResult::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter543; - for (_iter543 = this->partitions.begin(); _iter543 != this->partitions.end(); ++_iter543) + std::vector ::const_iterator _iter561; + for (_iter561 = this->partitions.begin(); _iter561 != this->partitions.end(); ++_iter561) { - xfer += (*_iter543).write(oprot); + xfer += (*_iter561).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12777,13 +13869,13 @@ void swap(DropPartitionsResult &a, DropPartitionsResult &b) { swap(a.__isset, b.__isset); } -DropPartitionsResult::DropPartitionsResult(const DropPartitionsResult& other544) { - partitions = other544.partitions; - __isset = other544.__isset; +DropPartitionsResult::DropPartitionsResult(const DropPartitionsResult& other562) { + partitions = other562.partitions; + __isset = other562.__isset; } -DropPartitionsResult& DropPartitionsResult::operator=(const DropPartitionsResult& other545) { - partitions = other545.partitions; - __isset = other545.__isset; +DropPartitionsResult& DropPartitionsResult::operator=(const DropPartitionsResult& other563) { + partitions = other563.partitions; + __isset = other563.__isset; return *this; } void DropPartitionsResult::printTo(std::ostream& out) const { @@ -12885,15 +13977,15 @@ void swap(DropPartitionsExpr &a, DropPartitionsExpr &b) { swap(a.__isset, b.__isset); } -DropPartitionsExpr::DropPartitionsExpr(const DropPartitionsExpr& other546) { - expr = other546.expr; - partArchiveLevel = other546.partArchiveLevel; - __isset = other546.__isset; +DropPartitionsExpr::DropPartitionsExpr(const DropPartitionsExpr& other564) { + expr = other564.expr; + partArchiveLevel = other564.partArchiveLevel; + __isset = other564.__isset; } -DropPartitionsExpr& DropPartitionsExpr::operator=(const DropPartitionsExpr& other547) { - expr = other547.expr; - partArchiveLevel = other547.partArchiveLevel; - __isset = other547.__isset; +DropPartitionsExpr& DropPartitionsExpr::operator=(const DropPartitionsExpr& other565) { + expr = other565.expr; + partArchiveLevel = other565.partArchiveLevel; + __isset = other565.__isset; return *this; } void DropPartitionsExpr::printTo(std::ostream& out) const { @@ -12942,14 +14034,14 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size548; - ::apache::thrift::protocol::TType _etype551; - xfer += iprot->readListBegin(_etype551, _size548); - this->names.resize(_size548); - uint32_t _i552; - for (_i552 = 0; _i552 < _size548; ++_i552) + uint32_t _size566; + ::apache::thrift::protocol::TType _etype569; + xfer += iprot->readListBegin(_etype569, _size566); + this->names.resize(_size566); + uint32_t _i570; + for (_i570 = 0; _i570 < _size566; ++_i570) { - xfer += iprot->readString(this->names[_i552]); + xfer += iprot->readString(this->names[_i570]); } xfer += iprot->readListEnd(); } @@ -12962,14 +14054,14 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->exprs.clear(); - uint32_t _size553; - ::apache::thrift::protocol::TType _etype556; - xfer += iprot->readListBegin(_etype556, _size553); - this->exprs.resize(_size553); - uint32_t _i557; - for (_i557 = 0; _i557 < _size553; ++_i557) + uint32_t _size571; + ::apache::thrift::protocol::TType _etype574; + xfer += iprot->readListBegin(_etype574, _size571); + this->exprs.resize(_size571); + uint32_t _i575; + for (_i575 = 0; _i575 < _size571; ++_i575) { - xfer += this->exprs[_i557].read(iprot); + xfer += this->exprs[_i575].read(iprot); } xfer += iprot->readListEnd(); } @@ -12998,10 +14090,10 @@ uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter558; - for (_iter558 = this->names.begin(); _iter558 != this->names.end(); ++_iter558) + std::vector ::const_iterator _iter576; + for (_iter576 = this->names.begin(); _iter576 != this->names.end(); ++_iter576) { - xfer += oprot->writeString((*_iter558)); + xfer += oprot->writeString((*_iter576)); } xfer += oprot->writeListEnd(); } @@ -13010,10 +14102,10 @@ uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("exprs", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->exprs.size())); - std::vector ::const_iterator _iter559; - for (_iter559 = this->exprs.begin(); _iter559 != this->exprs.end(); ++_iter559) + std::vector ::const_iterator _iter577; + for (_iter577 = this->exprs.begin(); _iter577 != this->exprs.end(); ++_iter577) { - xfer += (*_iter559).write(oprot); + xfer += (*_iter577).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13031,15 +14123,15 @@ void swap(RequestPartsSpec &a, RequestPartsSpec &b) { swap(a.__isset, b.__isset); } -RequestPartsSpec::RequestPartsSpec(const RequestPartsSpec& other560) { - names = other560.names; - exprs = other560.exprs; - __isset = other560.__isset; +RequestPartsSpec::RequestPartsSpec(const RequestPartsSpec& other578) { + names = other578.names; + exprs = other578.exprs; + __isset = other578.__isset; } -RequestPartsSpec& RequestPartsSpec::operator=(const RequestPartsSpec& other561) { - names = other561.names; - exprs = other561.exprs; - __isset = other561.__isset; +RequestPartsSpec& RequestPartsSpec::operator=(const RequestPartsSpec& other579) { + names = other579.names; + exprs = other579.exprs; + __isset = other579.__isset; return *this; } void RequestPartsSpec::printTo(std::ostream& out) const { @@ -13092,6 +14184,11 @@ void DropPartitionsRequest::__set_needResult(const bool val) { __isset.needResult = true; } +void DropPartitionsRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t DropPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -13180,6 +14277,14 @@ uint32_t DropPartitionsRequest::read(::apache::thrift::protocol::TProtocol* ipro xfer += iprot->skip(ftype); } break; + case 9: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -13240,6 +14345,11 @@ uint32_t DropPartitionsRequest::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeBool(this->needResult); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 9); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -13255,30 +14365,33 @@ void swap(DropPartitionsRequest &a, DropPartitionsRequest &b) { swap(a.ignoreProtection, b.ignoreProtection); swap(a.environmentContext, b.environmentContext); swap(a.needResult, b.needResult); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -DropPartitionsRequest::DropPartitionsRequest(const DropPartitionsRequest& other562) { - dbName = other562.dbName; - tblName = other562.tblName; - parts = other562.parts; - deleteData = other562.deleteData; - ifExists = other562.ifExists; - ignoreProtection = other562.ignoreProtection; - environmentContext = other562.environmentContext; - needResult = other562.needResult; - __isset = other562.__isset; -} -DropPartitionsRequest& DropPartitionsRequest::operator=(const DropPartitionsRequest& other563) { - dbName = other563.dbName; - tblName = other563.tblName; - parts = other563.parts; - deleteData = other563.deleteData; - ifExists = other563.ifExists; - ignoreProtection = other563.ignoreProtection; - environmentContext = other563.environmentContext; - needResult = other563.needResult; - __isset = other563.__isset; +DropPartitionsRequest::DropPartitionsRequest(const DropPartitionsRequest& other580) { + dbName = other580.dbName; + tblName = other580.tblName; + parts = other580.parts; + deleteData = other580.deleteData; + ifExists = other580.ifExists; + ignoreProtection = other580.ignoreProtection; + environmentContext = other580.environmentContext; + needResult = other580.needResult; + catName = other580.catName; + __isset = other580.__isset; +} +DropPartitionsRequest& DropPartitionsRequest::operator=(const DropPartitionsRequest& other581) { + dbName = other581.dbName; + tblName = other581.tblName; + parts = other581.parts; + deleteData = other581.deleteData; + ifExists = other581.ifExists; + ignoreProtection = other581.ignoreProtection; + environmentContext = other581.environmentContext; + needResult = other581.needResult; + catName = other581.catName; + __isset = other581.__isset; return *this; } void DropPartitionsRequest::printTo(std::ostream& out) const { @@ -13292,6 +14405,7 @@ void DropPartitionsRequest::printTo(std::ostream& out) const { out << ", " << "ignoreProtection="; (__isset.ignoreProtection ? (out << to_string(ignoreProtection)) : (out << "")); out << ", " << "environmentContext="; (__isset.environmentContext ? (out << to_string(environmentContext)) : (out << "")); out << ", " << "needResult="; (__isset.needResult ? (out << to_string(needResult)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -13337,6 +14451,11 @@ void PartitionValuesRequest::__set_maxParts(const int64_t val) { __isset.maxParts = true; } +void PartitionValuesRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t PartitionValuesRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -13381,14 +14500,14 @@ uint32_t PartitionValuesRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionKeys.clear(); - uint32_t _size564; - ::apache::thrift::protocol::TType _etype567; - xfer += iprot->readListBegin(_etype567, _size564); - this->partitionKeys.resize(_size564); - uint32_t _i568; - for (_i568 = 0; _i568 < _size564; ++_i568) + uint32_t _size582; + ::apache::thrift::protocol::TType _etype585; + xfer += iprot->readListBegin(_etype585, _size582); + this->partitionKeys.resize(_size582); + uint32_t _i586; + for (_i586 = 0; _i586 < _size582; ++_i586) { - xfer += this->partitionKeys[_i568].read(iprot); + xfer += this->partitionKeys[_i586].read(iprot); } xfer += iprot->readListEnd(); } @@ -13417,14 +14536,14 @@ uint32_t PartitionValuesRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionOrder.clear(); - uint32_t _size569; - ::apache::thrift::protocol::TType _etype572; - xfer += iprot->readListBegin(_etype572, _size569); - this->partitionOrder.resize(_size569); - uint32_t _i573; - for (_i573 = 0; _i573 < _size569; ++_i573) + uint32_t _size587; + ::apache::thrift::protocol::TType _etype590; + xfer += iprot->readListBegin(_etype590, _size587); + this->partitionOrder.resize(_size587); + uint32_t _i591; + for (_i591 = 0; _i591 < _size587; ++_i591) { - xfer += this->partitionOrder[_i573].read(iprot); + xfer += this->partitionOrder[_i591].read(iprot); } xfer += iprot->readListEnd(); } @@ -13449,6 +14568,14 @@ uint32_t PartitionValuesRequest::read(::apache::thrift::protocol::TProtocol* ipr xfer += iprot->skip(ftype); } break; + case 9: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -13483,10 +14610,10 @@ uint32_t PartitionValuesRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionKeys.size())); - std::vector ::const_iterator _iter574; - for (_iter574 = this->partitionKeys.begin(); _iter574 != this->partitionKeys.end(); ++_iter574) + std::vector ::const_iterator _iter592; + for (_iter592 = this->partitionKeys.begin(); _iter592 != this->partitionKeys.end(); ++_iter592) { - xfer += (*_iter574).write(oprot); + xfer += (*_iter592).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13506,10 +14633,10 @@ uint32_t PartitionValuesRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partitionOrder", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionOrder.size())); - std::vector ::const_iterator _iter575; - for (_iter575 = this->partitionOrder.begin(); _iter575 != this->partitionOrder.end(); ++_iter575) + std::vector ::const_iterator _iter593; + for (_iter593 = this->partitionOrder.begin(); _iter593 != this->partitionOrder.end(); ++_iter593) { - xfer += (*_iter575).write(oprot); + xfer += (*_iter593).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13525,6 +14652,11 @@ uint32_t PartitionValuesRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeI64(this->maxParts); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 9); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -13540,30 +14672,33 @@ void swap(PartitionValuesRequest &a, PartitionValuesRequest &b) { swap(a.partitionOrder, b.partitionOrder); swap(a.ascending, b.ascending); swap(a.maxParts, b.maxParts); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -PartitionValuesRequest::PartitionValuesRequest(const PartitionValuesRequest& other576) { - dbName = other576.dbName; - tblName = other576.tblName; - partitionKeys = other576.partitionKeys; - applyDistinct = other576.applyDistinct; - filter = other576.filter; - partitionOrder = other576.partitionOrder; - ascending = other576.ascending; - maxParts = other576.maxParts; - __isset = other576.__isset; -} -PartitionValuesRequest& PartitionValuesRequest::operator=(const PartitionValuesRequest& other577) { - dbName = other577.dbName; - tblName = other577.tblName; - partitionKeys = other577.partitionKeys; - applyDistinct = other577.applyDistinct; - filter = other577.filter; - partitionOrder = other577.partitionOrder; - ascending = other577.ascending; - maxParts = other577.maxParts; - __isset = other577.__isset; +PartitionValuesRequest::PartitionValuesRequest(const PartitionValuesRequest& other594) { + dbName = other594.dbName; + tblName = other594.tblName; + partitionKeys = other594.partitionKeys; + applyDistinct = other594.applyDistinct; + filter = other594.filter; + partitionOrder = other594.partitionOrder; + ascending = other594.ascending; + maxParts = other594.maxParts; + catName = other594.catName; + __isset = other594.__isset; +} +PartitionValuesRequest& PartitionValuesRequest::operator=(const PartitionValuesRequest& other595) { + dbName = other595.dbName; + tblName = other595.tblName; + partitionKeys = other595.partitionKeys; + applyDistinct = other595.applyDistinct; + filter = other595.filter; + partitionOrder = other595.partitionOrder; + ascending = other595.ascending; + maxParts = other595.maxParts; + catName = other595.catName; + __isset = other595.__isset; return *this; } void PartitionValuesRequest::printTo(std::ostream& out) const { @@ -13577,6 +14712,7 @@ void PartitionValuesRequest::printTo(std::ostream& out) const { out << ", " << "partitionOrder="; (__isset.partitionOrder ? (out << to_string(partitionOrder)) : (out << "")); out << ", " << "ascending="; (__isset.ascending ? (out << to_string(ascending)) : (out << "")); out << ", " << "maxParts="; (__isset.maxParts ? (out << to_string(maxParts)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -13615,14 +14751,14 @@ uint32_t PartitionValuesRow::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->row.clear(); - uint32_t _size578; - ::apache::thrift::protocol::TType _etype581; - xfer += iprot->readListBegin(_etype581, _size578); - this->row.resize(_size578); - uint32_t _i582; - for (_i582 = 0; _i582 < _size578; ++_i582) + uint32_t _size596; + ::apache::thrift::protocol::TType _etype599; + xfer += iprot->readListBegin(_etype599, _size596); + this->row.resize(_size596); + uint32_t _i600; + for (_i600 = 0; _i600 < _size596; ++_i600) { - xfer += iprot->readString(this->row[_i582]); + xfer += iprot->readString(this->row[_i600]); } xfer += iprot->readListEnd(); } @@ -13653,10 +14789,10 @@ uint32_t PartitionValuesRow::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("row", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->row.size())); - std::vector ::const_iterator _iter583; - for (_iter583 = this->row.begin(); _iter583 != this->row.end(); ++_iter583) + std::vector ::const_iterator _iter601; + for (_iter601 = this->row.begin(); _iter601 != this->row.end(); ++_iter601) { - xfer += oprot->writeString((*_iter583)); + xfer += oprot->writeString((*_iter601)); } xfer += oprot->writeListEnd(); } @@ -13672,11 +14808,11 @@ void swap(PartitionValuesRow &a, PartitionValuesRow &b) { swap(a.row, b.row); } -PartitionValuesRow::PartitionValuesRow(const PartitionValuesRow& other584) { - row = other584.row; +PartitionValuesRow::PartitionValuesRow(const PartitionValuesRow& other602) { + row = other602.row; } -PartitionValuesRow& PartitionValuesRow::operator=(const PartitionValuesRow& other585) { - row = other585.row; +PartitionValuesRow& PartitionValuesRow::operator=(const PartitionValuesRow& other603) { + row = other603.row; return *this; } void PartitionValuesRow::printTo(std::ostream& out) const { @@ -13721,14 +14857,14 @@ uint32_t PartitionValuesResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionValues.clear(); - uint32_t _size586; - ::apache::thrift::protocol::TType _etype589; - xfer += iprot->readListBegin(_etype589, _size586); - this->partitionValues.resize(_size586); - uint32_t _i590; - for (_i590 = 0; _i590 < _size586; ++_i590) + uint32_t _size604; + ::apache::thrift::protocol::TType _etype607; + xfer += iprot->readListBegin(_etype607, _size604); + this->partitionValues.resize(_size604); + uint32_t _i608; + for (_i608 = 0; _i608 < _size604; ++_i608) { - xfer += this->partitionValues[_i590].read(iprot); + xfer += this->partitionValues[_i608].read(iprot); } xfer += iprot->readListEnd(); } @@ -13759,10 +14895,10 @@ uint32_t PartitionValuesResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("partitionValues", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionValues.size())); - std::vector ::const_iterator _iter591; - for (_iter591 = this->partitionValues.begin(); _iter591 != this->partitionValues.end(); ++_iter591) + std::vector ::const_iterator _iter609; + for (_iter609 = this->partitionValues.begin(); _iter609 != this->partitionValues.end(); ++_iter609) { - xfer += (*_iter591).write(oprot); + xfer += (*_iter609).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13778,11 +14914,11 @@ void swap(PartitionValuesResponse &a, PartitionValuesResponse &b) { swap(a.partitionValues, b.partitionValues); } -PartitionValuesResponse::PartitionValuesResponse(const PartitionValuesResponse& other592) { - partitionValues = other592.partitionValues; +PartitionValuesResponse::PartitionValuesResponse(const PartitionValuesResponse& other610) { + partitionValues = other610.partitionValues; } -PartitionValuesResponse& PartitionValuesResponse::operator=(const PartitionValuesResponse& other593) { - partitionValues = other593.partitionValues; +PartitionValuesResponse& PartitionValuesResponse::operator=(const PartitionValuesResponse& other611) { + partitionValues = other611.partitionValues; return *this; } void PartitionValuesResponse::printTo(std::ostream& out) const { @@ -13828,9 +14964,9 @@ uint32_t ResourceUri::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast594; - xfer += iprot->readI32(ecast594); - this->resourceType = (ResourceType::type)ecast594; + int32_t ecast612; + xfer += iprot->readI32(ecast612); + this->resourceType = (ResourceType::type)ecast612; this->__isset.resourceType = true; } else { xfer += iprot->skip(ftype); @@ -13881,15 +15017,15 @@ void swap(ResourceUri &a, ResourceUri &b) { swap(a.__isset, b.__isset); } -ResourceUri::ResourceUri(const ResourceUri& other595) { - resourceType = other595.resourceType; - uri = other595.uri; - __isset = other595.__isset; +ResourceUri::ResourceUri(const ResourceUri& other613) { + resourceType = other613.resourceType; + uri = other613.uri; + __isset = other613.__isset; } -ResourceUri& ResourceUri::operator=(const ResourceUri& other596) { - resourceType = other596.resourceType; - uri = other596.uri; - __isset = other596.__isset; +ResourceUri& ResourceUri::operator=(const ResourceUri& other614) { + resourceType = other614.resourceType; + uri = other614.uri; + __isset = other614.__isset; return *this; } void ResourceUri::printTo(std::ostream& out) const { @@ -13937,6 +15073,11 @@ void Function::__set_resourceUris(const std::vector & val) { this->resourceUris = val; } +void Function::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -13992,9 +15133,9 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast597; - xfer += iprot->readI32(ecast597); - this->ownerType = (PrincipalType::type)ecast597; + int32_t ecast615; + xfer += iprot->readI32(ecast615); + this->ownerType = (PrincipalType::type)ecast615; this->__isset.ownerType = true; } else { xfer += iprot->skip(ftype); @@ -14010,9 +15151,9 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 7: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast598; - xfer += iprot->readI32(ecast598); - this->functionType = (FunctionType::type)ecast598; + int32_t ecast616; + xfer += iprot->readI32(ecast616); + this->functionType = (FunctionType::type)ecast616; this->__isset.functionType = true; } else { xfer += iprot->skip(ftype); @@ -14022,14 +15163,14 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->resourceUris.clear(); - uint32_t _size599; - ::apache::thrift::protocol::TType _etype602; - xfer += iprot->readListBegin(_etype602, _size599); - this->resourceUris.resize(_size599); - uint32_t _i603; - for (_i603 = 0; _i603 < _size599; ++_i603) + uint32_t _size617; + ::apache::thrift::protocol::TType _etype620; + xfer += iprot->readListBegin(_etype620, _size617); + this->resourceUris.resize(_size617); + uint32_t _i621; + for (_i621 = 0; _i621 < _size617; ++_i621) { - xfer += this->resourceUris[_i603].read(iprot); + xfer += this->resourceUris[_i621].read(iprot); } xfer += iprot->readListEnd(); } @@ -14038,6 +15179,14 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 9: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -14086,15 +15235,20 @@ uint32_t Function::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("resourceUris", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->resourceUris.size())); - std::vector ::const_iterator _iter604; - for (_iter604 = this->resourceUris.begin(); _iter604 != this->resourceUris.end(); ++_iter604) + std::vector ::const_iterator _iter622; + for (_iter622 = this->resourceUris.begin(); _iter622 != this->resourceUris.end(); ++_iter622) { - xfer += (*_iter604).write(oprot); + xfer += (*_iter622).write(oprot); } xfer += oprot->writeListEnd(); } xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 9); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -14110,30 +15264,33 @@ void swap(Function &a, Function &b) { swap(a.createTime, b.createTime); swap(a.functionType, b.functionType); swap(a.resourceUris, b.resourceUris); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -Function::Function(const Function& other605) { - functionName = other605.functionName; - dbName = other605.dbName; - className = other605.className; - ownerName = other605.ownerName; - ownerType = other605.ownerType; - createTime = other605.createTime; - functionType = other605.functionType; - resourceUris = other605.resourceUris; - __isset = other605.__isset; -} -Function& Function::operator=(const Function& other606) { - functionName = other606.functionName; - dbName = other606.dbName; - className = other606.className; - ownerName = other606.ownerName; - ownerType = other606.ownerType; - createTime = other606.createTime; - functionType = other606.functionType; - resourceUris = other606.resourceUris; - __isset = other606.__isset; +Function::Function(const Function& other623) { + functionName = other623.functionName; + dbName = other623.dbName; + className = other623.className; + ownerName = other623.ownerName; + ownerType = other623.ownerType; + createTime = other623.createTime; + functionType = other623.functionType; + resourceUris = other623.resourceUris; + catName = other623.catName; + __isset = other623.__isset; +} +Function& Function::operator=(const Function& other624) { + functionName = other624.functionName; + dbName = other624.dbName; + className = other624.className; + ownerName = other624.ownerName; + ownerType = other624.ownerType; + createTime = other624.createTime; + functionType = other624.functionType; + resourceUris = other624.resourceUris; + catName = other624.catName; + __isset = other624.__isset; return *this; } void Function::printTo(std::ostream& out) const { @@ -14147,6 +15304,7 @@ void Function::printTo(std::ostream& out) const { out << ", " << "createTime=" << to_string(createTime); out << ", " << "functionType=" << to_string(functionType); out << ", " << "resourceUris=" << to_string(resourceUris); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -14231,9 +15389,9 @@ uint32_t TxnInfo::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast607; - xfer += iprot->readI32(ecast607); - this->state = (TxnState::type)ecast607; + int32_t ecast625; + xfer += iprot->readI32(ecast625); + this->state = (TxnState::type)ecast625; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -14380,29 +15538,29 @@ void swap(TxnInfo &a, TxnInfo &b) { swap(a.__isset, b.__isset); } -TxnInfo::TxnInfo(const TxnInfo& other608) { - id = other608.id; - state = other608.state; - user = other608.user; - hostname = other608.hostname; - agentInfo = other608.agentInfo; - heartbeatCount = other608.heartbeatCount; - metaInfo = other608.metaInfo; - startedTime = other608.startedTime; - lastHeartbeatTime = other608.lastHeartbeatTime; - __isset = other608.__isset; -} -TxnInfo& TxnInfo::operator=(const TxnInfo& other609) { - id = other609.id; - state = other609.state; - user = other609.user; - hostname = other609.hostname; - agentInfo = other609.agentInfo; - heartbeatCount = other609.heartbeatCount; - metaInfo = other609.metaInfo; - startedTime = other609.startedTime; - lastHeartbeatTime = other609.lastHeartbeatTime; - __isset = other609.__isset; +TxnInfo::TxnInfo(const TxnInfo& other626) { + id = other626.id; + state = other626.state; + user = other626.user; + hostname = other626.hostname; + agentInfo = other626.agentInfo; + heartbeatCount = other626.heartbeatCount; + metaInfo = other626.metaInfo; + startedTime = other626.startedTime; + lastHeartbeatTime = other626.lastHeartbeatTime; + __isset = other626.__isset; +} +TxnInfo& TxnInfo::operator=(const TxnInfo& other627) { + id = other627.id; + state = other627.state; + user = other627.user; + hostname = other627.hostname; + agentInfo = other627.agentInfo; + heartbeatCount = other627.heartbeatCount; + metaInfo = other627.metaInfo; + startedTime = other627.startedTime; + lastHeartbeatTime = other627.lastHeartbeatTime; + __isset = other627.__isset; return *this; } void TxnInfo::printTo(std::ostream& out) const { @@ -14468,14 +15626,14 @@ uint32_t GetOpenTxnsInfoResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->open_txns.clear(); - uint32_t _size610; - ::apache::thrift::protocol::TType _etype613; - xfer += iprot->readListBegin(_etype613, _size610); - this->open_txns.resize(_size610); - uint32_t _i614; - for (_i614 = 0; _i614 < _size610; ++_i614) + uint32_t _size628; + ::apache::thrift::protocol::TType _etype631; + xfer += iprot->readListBegin(_etype631, _size628); + this->open_txns.resize(_size628); + uint32_t _i632; + for (_i632 = 0; _i632 < _size628; ++_i632) { - xfer += this->open_txns[_i614].read(iprot); + xfer += this->open_txns[_i632].read(iprot); } xfer += iprot->readListEnd(); } @@ -14512,10 +15670,10 @@ uint32_t GetOpenTxnsInfoResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->open_txns.size())); - std::vector ::const_iterator _iter615; - for (_iter615 = this->open_txns.begin(); _iter615 != this->open_txns.end(); ++_iter615) + std::vector ::const_iterator _iter633; + for (_iter633 = this->open_txns.begin(); _iter633 != this->open_txns.end(); ++_iter633) { - xfer += (*_iter615).write(oprot); + xfer += (*_iter633).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14532,13 +15690,13 @@ void swap(GetOpenTxnsInfoResponse &a, GetOpenTxnsInfoResponse &b) { swap(a.open_txns, b.open_txns); } -GetOpenTxnsInfoResponse::GetOpenTxnsInfoResponse(const GetOpenTxnsInfoResponse& other616) { - txn_high_water_mark = other616.txn_high_water_mark; - open_txns = other616.open_txns; +GetOpenTxnsInfoResponse::GetOpenTxnsInfoResponse(const GetOpenTxnsInfoResponse& other634) { + txn_high_water_mark = other634.txn_high_water_mark; + open_txns = other634.open_txns; } -GetOpenTxnsInfoResponse& GetOpenTxnsInfoResponse::operator=(const GetOpenTxnsInfoResponse& other617) { - txn_high_water_mark = other617.txn_high_water_mark; - open_txns = other617.open_txns; +GetOpenTxnsInfoResponse& GetOpenTxnsInfoResponse::operator=(const GetOpenTxnsInfoResponse& other635) { + txn_high_water_mark = other635.txn_high_water_mark; + open_txns = other635.open_txns; return *this; } void GetOpenTxnsInfoResponse::printTo(std::ostream& out) const { @@ -14607,14 +15765,14 @@ uint32_t GetOpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->open_txns.clear(); - uint32_t _size618; - ::apache::thrift::protocol::TType _etype621; - xfer += iprot->readListBegin(_etype621, _size618); - this->open_txns.resize(_size618); - uint32_t _i622; - for (_i622 = 0; _i622 < _size618; ++_i622) + uint32_t _size636; + ::apache::thrift::protocol::TType _etype639; + xfer += iprot->readListBegin(_etype639, _size636); + this->open_txns.resize(_size636); + uint32_t _i640; + for (_i640 = 0; _i640 < _size636; ++_i640) { - xfer += iprot->readI64(this->open_txns[_i622]); + xfer += iprot->readI64(this->open_txns[_i640]); } xfer += iprot->readListEnd(); } @@ -14669,10 +15827,10 @@ uint32_t GetOpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->open_txns.size())); - std::vector ::const_iterator _iter623; - for (_iter623 = this->open_txns.begin(); _iter623 != this->open_txns.end(); ++_iter623) + std::vector ::const_iterator _iter641; + for (_iter641 = this->open_txns.begin(); _iter641 != this->open_txns.end(); ++_iter641) { - xfer += oprot->writeI64((*_iter623)); + xfer += oprot->writeI64((*_iter641)); } xfer += oprot->writeListEnd(); } @@ -14701,19 +15859,19 @@ void swap(GetOpenTxnsResponse &a, GetOpenTxnsResponse &b) { swap(a.__isset, b.__isset); } -GetOpenTxnsResponse::GetOpenTxnsResponse(const GetOpenTxnsResponse& other624) { - txn_high_water_mark = other624.txn_high_water_mark; - open_txns = other624.open_txns; - min_open_txn = other624.min_open_txn; - abortedBits = other624.abortedBits; - __isset = other624.__isset; +GetOpenTxnsResponse::GetOpenTxnsResponse(const GetOpenTxnsResponse& other642) { + txn_high_water_mark = other642.txn_high_water_mark; + open_txns = other642.open_txns; + min_open_txn = other642.min_open_txn; + abortedBits = other642.abortedBits; + __isset = other642.__isset; } -GetOpenTxnsResponse& GetOpenTxnsResponse::operator=(const GetOpenTxnsResponse& other625) { - txn_high_water_mark = other625.txn_high_water_mark; - open_txns = other625.open_txns; - min_open_txn = other625.min_open_txn; - abortedBits = other625.abortedBits; - __isset = other625.__isset; +GetOpenTxnsResponse& GetOpenTxnsResponse::operator=(const GetOpenTxnsResponse& other643) { + txn_high_water_mark = other643.txn_high_water_mark; + open_txns = other643.open_txns; + min_open_txn = other643.min_open_txn; + abortedBits = other643.abortedBits; + __isset = other643.__isset; return *this; } void GetOpenTxnsResponse::printTo(std::ostream& out) const { @@ -14858,19 +16016,19 @@ void swap(OpenTxnRequest &a, OpenTxnRequest &b) { swap(a.__isset, b.__isset); } -OpenTxnRequest::OpenTxnRequest(const OpenTxnRequest& other626) { - num_txns = other626.num_txns; - user = other626.user; - hostname = other626.hostname; - agentInfo = other626.agentInfo; - __isset = other626.__isset; +OpenTxnRequest::OpenTxnRequest(const OpenTxnRequest& other644) { + num_txns = other644.num_txns; + user = other644.user; + hostname = other644.hostname; + agentInfo = other644.agentInfo; + __isset = other644.__isset; } -OpenTxnRequest& OpenTxnRequest::operator=(const OpenTxnRequest& other627) { - num_txns = other627.num_txns; - user = other627.user; - hostname = other627.hostname; - agentInfo = other627.agentInfo; - __isset = other627.__isset; +OpenTxnRequest& OpenTxnRequest::operator=(const OpenTxnRequest& other645) { + num_txns = other645.num_txns; + user = other645.user; + hostname = other645.hostname; + agentInfo = other645.agentInfo; + __isset = other645.__isset; return *this; } void OpenTxnRequest::printTo(std::ostream& out) const { @@ -14918,14 +16076,14 @@ uint32_t OpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txn_ids.clear(); - uint32_t _size628; - ::apache::thrift::protocol::TType _etype631; - xfer += iprot->readListBegin(_etype631, _size628); - this->txn_ids.resize(_size628); - uint32_t _i632; - for (_i632 = 0; _i632 < _size628; ++_i632) + uint32_t _size646; + ::apache::thrift::protocol::TType _etype649; + xfer += iprot->readListBegin(_etype649, _size646); + this->txn_ids.resize(_size646); + uint32_t _i650; + for (_i650 = 0; _i650 < _size646; ++_i650) { - xfer += iprot->readI64(this->txn_ids[_i632]); + xfer += iprot->readI64(this->txn_ids[_i650]); } xfer += iprot->readListEnd(); } @@ -14956,10 +16114,10 @@ uint32_t OpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txn_ids.size())); - std::vector ::const_iterator _iter633; - for (_iter633 = this->txn_ids.begin(); _iter633 != this->txn_ids.end(); ++_iter633) + std::vector ::const_iterator _iter651; + for (_iter651 = this->txn_ids.begin(); _iter651 != this->txn_ids.end(); ++_iter651) { - xfer += oprot->writeI64((*_iter633)); + xfer += oprot->writeI64((*_iter651)); } xfer += oprot->writeListEnd(); } @@ -14975,11 +16133,11 @@ void swap(OpenTxnsResponse &a, OpenTxnsResponse &b) { swap(a.txn_ids, b.txn_ids); } -OpenTxnsResponse::OpenTxnsResponse(const OpenTxnsResponse& other634) { - txn_ids = other634.txn_ids; +OpenTxnsResponse::OpenTxnsResponse(const OpenTxnsResponse& other652) { + txn_ids = other652.txn_ids; } -OpenTxnsResponse& OpenTxnsResponse::operator=(const OpenTxnsResponse& other635) { - txn_ids = other635.txn_ids; +OpenTxnsResponse& OpenTxnsResponse::operator=(const OpenTxnsResponse& other653) { + txn_ids = other653.txn_ids; return *this; } void OpenTxnsResponse::printTo(std::ostream& out) const { @@ -15061,11 +16219,11 @@ void swap(AbortTxnRequest &a, AbortTxnRequest &b) { swap(a.txnid, b.txnid); } -AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other636) { - txnid = other636.txnid; +AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other654) { + txnid = other654.txnid; } -AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other637) { - txnid = other637.txnid; +AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other655) { + txnid = other655.txnid; return *this; } void AbortTxnRequest::printTo(std::ostream& out) const { @@ -15110,14 +16268,14 @@ uint32_t AbortTxnsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txn_ids.clear(); - uint32_t _size638; - ::apache::thrift::protocol::TType _etype641; - xfer += iprot->readListBegin(_etype641, _size638); - this->txn_ids.resize(_size638); - uint32_t _i642; - for (_i642 = 0; _i642 < _size638; ++_i642) + uint32_t _size656; + ::apache::thrift::protocol::TType _etype659; + xfer += iprot->readListBegin(_etype659, _size656); + this->txn_ids.resize(_size656); + uint32_t _i660; + for (_i660 = 0; _i660 < _size656; ++_i660) { - xfer += iprot->readI64(this->txn_ids[_i642]); + xfer += iprot->readI64(this->txn_ids[_i660]); } xfer += iprot->readListEnd(); } @@ -15148,10 +16306,10 @@ uint32_t AbortTxnsRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txn_ids.size())); - std::vector ::const_iterator _iter643; - for (_iter643 = this->txn_ids.begin(); _iter643 != this->txn_ids.end(); ++_iter643) + std::vector ::const_iterator _iter661; + for (_iter661 = this->txn_ids.begin(); _iter661 != this->txn_ids.end(); ++_iter661) { - xfer += oprot->writeI64((*_iter643)); + xfer += oprot->writeI64((*_iter661)); } xfer += oprot->writeListEnd(); } @@ -15167,11 +16325,11 @@ void swap(AbortTxnsRequest &a, AbortTxnsRequest &b) { swap(a.txn_ids, b.txn_ids); } -AbortTxnsRequest::AbortTxnsRequest(const AbortTxnsRequest& other644) { - txn_ids = other644.txn_ids; +AbortTxnsRequest::AbortTxnsRequest(const AbortTxnsRequest& other662) { + txn_ids = other662.txn_ids; } -AbortTxnsRequest& AbortTxnsRequest::operator=(const AbortTxnsRequest& other645) { - txn_ids = other645.txn_ids; +AbortTxnsRequest& AbortTxnsRequest::operator=(const AbortTxnsRequest& other663) { + txn_ids = other663.txn_ids; return *this; } void AbortTxnsRequest::printTo(std::ostream& out) const { @@ -15253,11 +16411,11 @@ void swap(CommitTxnRequest &a, CommitTxnRequest &b) { swap(a.txnid, b.txnid); } -CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other646) { - txnid = other646.txnid; +CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other664) { + txnid = other664.txnid; } -CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& other647) { - txnid = other647.txnid; +CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& other665) { + txnid = other665.txnid; return *this; } void CommitTxnRequest::printTo(std::ostream& out) const { @@ -15307,14 +16465,14 @@ uint32_t GetValidWriteIdsRequest::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fullTableNames.clear(); - uint32_t _size648; - ::apache::thrift::protocol::TType _etype651; - xfer += iprot->readListBegin(_etype651, _size648); - this->fullTableNames.resize(_size648); - uint32_t _i652; - for (_i652 = 0; _i652 < _size648; ++_i652) + uint32_t _size666; + ::apache::thrift::protocol::TType _etype669; + xfer += iprot->readListBegin(_etype669, _size666); + this->fullTableNames.resize(_size666); + uint32_t _i670; + for (_i670 = 0; _i670 < _size666; ++_i670) { - xfer += iprot->readString(this->fullTableNames[_i652]); + xfer += iprot->readString(this->fullTableNames[_i670]); } xfer += iprot->readListEnd(); } @@ -15355,10 +16513,10 @@ uint32_t GetValidWriteIdsRequest::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("fullTableNames", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->fullTableNames.size())); - std::vector ::const_iterator _iter653; - for (_iter653 = this->fullTableNames.begin(); _iter653 != this->fullTableNames.end(); ++_iter653) + std::vector ::const_iterator _iter671; + for (_iter671 = this->fullTableNames.begin(); _iter671 != this->fullTableNames.end(); ++_iter671) { - xfer += oprot->writeString((*_iter653)); + xfer += oprot->writeString((*_iter671)); } xfer += oprot->writeListEnd(); } @@ -15379,13 +16537,13 @@ void swap(GetValidWriteIdsRequest &a, GetValidWriteIdsRequest &b) { swap(a.validTxnList, b.validTxnList); } -GetValidWriteIdsRequest::GetValidWriteIdsRequest(const GetValidWriteIdsRequest& other654) { - fullTableNames = other654.fullTableNames; - validTxnList = other654.validTxnList; +GetValidWriteIdsRequest::GetValidWriteIdsRequest(const GetValidWriteIdsRequest& other672) { + fullTableNames = other672.fullTableNames; + validTxnList = other672.validTxnList; } -GetValidWriteIdsRequest& GetValidWriteIdsRequest::operator=(const GetValidWriteIdsRequest& other655) { - fullTableNames = other655.fullTableNames; - validTxnList = other655.validTxnList; +GetValidWriteIdsRequest& GetValidWriteIdsRequest::operator=(const GetValidWriteIdsRequest& other673) { + fullTableNames = other673.fullTableNames; + validTxnList = other673.validTxnList; return *this; } void GetValidWriteIdsRequest::printTo(std::ostream& out) const { @@ -15467,14 +16625,14 @@ uint32_t TableValidWriteIds::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->invalidWriteIds.clear(); - uint32_t _size656; - ::apache::thrift::protocol::TType _etype659; - xfer += iprot->readListBegin(_etype659, _size656); - this->invalidWriteIds.resize(_size656); - uint32_t _i660; - for (_i660 = 0; _i660 < _size656; ++_i660) + uint32_t _size674; + ::apache::thrift::protocol::TType _etype677; + xfer += iprot->readListBegin(_etype677, _size674); + this->invalidWriteIds.resize(_size674); + uint32_t _i678; + for (_i678 = 0; _i678 < _size674; ++_i678) { - xfer += iprot->readI64(this->invalidWriteIds[_i660]); + xfer += iprot->readI64(this->invalidWriteIds[_i678]); } xfer += iprot->readListEnd(); } @@ -15535,10 +16693,10 @@ uint32_t TableValidWriteIds::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("invalidWriteIds", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->invalidWriteIds.size())); - std::vector ::const_iterator _iter661; - for (_iter661 = this->invalidWriteIds.begin(); _iter661 != this->invalidWriteIds.end(); ++_iter661) + std::vector ::const_iterator _iter679; + for (_iter679 = this->invalidWriteIds.begin(); _iter679 != this->invalidWriteIds.end(); ++_iter679) { - xfer += oprot->writeI64((*_iter661)); + xfer += oprot->writeI64((*_iter679)); } xfer += oprot->writeListEnd(); } @@ -15568,21 +16726,21 @@ void swap(TableValidWriteIds &a, TableValidWriteIds &b) { swap(a.__isset, b.__isset); } -TableValidWriteIds::TableValidWriteIds(const TableValidWriteIds& other662) { - fullTableName = other662.fullTableName; - writeIdHighWaterMark = other662.writeIdHighWaterMark; - invalidWriteIds = other662.invalidWriteIds; - minOpenWriteId = other662.minOpenWriteId; - abortedBits = other662.abortedBits; - __isset = other662.__isset; -} -TableValidWriteIds& TableValidWriteIds::operator=(const TableValidWriteIds& other663) { - fullTableName = other663.fullTableName; - writeIdHighWaterMark = other663.writeIdHighWaterMark; - invalidWriteIds = other663.invalidWriteIds; - minOpenWriteId = other663.minOpenWriteId; - abortedBits = other663.abortedBits; - __isset = other663.__isset; +TableValidWriteIds::TableValidWriteIds(const TableValidWriteIds& other680) { + fullTableName = other680.fullTableName; + writeIdHighWaterMark = other680.writeIdHighWaterMark; + invalidWriteIds = other680.invalidWriteIds; + minOpenWriteId = other680.minOpenWriteId; + abortedBits = other680.abortedBits; + __isset = other680.__isset; +} +TableValidWriteIds& TableValidWriteIds::operator=(const TableValidWriteIds& other681) { + fullTableName = other681.fullTableName; + writeIdHighWaterMark = other681.writeIdHighWaterMark; + invalidWriteIds = other681.invalidWriteIds; + minOpenWriteId = other681.minOpenWriteId; + abortedBits = other681.abortedBits; + __isset = other681.__isset; return *this; } void TableValidWriteIds::printTo(std::ostream& out) const { @@ -15631,14 +16789,14 @@ uint32_t GetValidWriteIdsResponse::read(::apache::thrift::protocol::TProtocol* i if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tblValidWriteIds.clear(); - uint32_t _size664; - ::apache::thrift::protocol::TType _etype667; - xfer += iprot->readListBegin(_etype667, _size664); - this->tblValidWriteIds.resize(_size664); - uint32_t _i668; - for (_i668 = 0; _i668 < _size664; ++_i668) + uint32_t _size682; + ::apache::thrift::protocol::TType _etype685; + xfer += iprot->readListBegin(_etype685, _size682); + this->tblValidWriteIds.resize(_size682); + uint32_t _i686; + for (_i686 = 0; _i686 < _size682; ++_i686) { - xfer += this->tblValidWriteIds[_i668].read(iprot); + xfer += this->tblValidWriteIds[_i686].read(iprot); } xfer += iprot->readListEnd(); } @@ -15669,10 +16827,10 @@ uint32_t GetValidWriteIdsResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("tblValidWriteIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tblValidWriteIds.size())); - std::vector ::const_iterator _iter669; - for (_iter669 = this->tblValidWriteIds.begin(); _iter669 != this->tblValidWriteIds.end(); ++_iter669) + std::vector ::const_iterator _iter687; + for (_iter687 = this->tblValidWriteIds.begin(); _iter687 != this->tblValidWriteIds.end(); ++_iter687) { - xfer += (*_iter669).write(oprot); + xfer += (*_iter687).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15688,11 +16846,11 @@ void swap(GetValidWriteIdsResponse &a, GetValidWriteIdsResponse &b) { swap(a.tblValidWriteIds, b.tblValidWriteIds); } -GetValidWriteIdsResponse::GetValidWriteIdsResponse(const GetValidWriteIdsResponse& other670) { - tblValidWriteIds = other670.tblValidWriteIds; +GetValidWriteIdsResponse::GetValidWriteIdsResponse(const GetValidWriteIdsResponse& other688) { + tblValidWriteIds = other688.tblValidWriteIds; } -GetValidWriteIdsResponse& GetValidWriteIdsResponse::operator=(const GetValidWriteIdsResponse& other671) { - tblValidWriteIds = other671.tblValidWriteIds; +GetValidWriteIdsResponse& GetValidWriteIdsResponse::operator=(const GetValidWriteIdsResponse& other689) { + tblValidWriteIds = other689.tblValidWriteIds; return *this; } void GetValidWriteIdsResponse::printTo(std::ostream& out) const { @@ -15747,14 +16905,14 @@ uint32_t AllocateTableWriteIdsRequest::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txnIds.clear(); - uint32_t _size672; - ::apache::thrift::protocol::TType _etype675; - xfer += iprot->readListBegin(_etype675, _size672); - this->txnIds.resize(_size672); - uint32_t _i676; - for (_i676 = 0; _i676 < _size672; ++_i676) + uint32_t _size690; + ::apache::thrift::protocol::TType _etype693; + xfer += iprot->readListBegin(_etype693, _size690); + this->txnIds.resize(_size690); + uint32_t _i694; + for (_i694 = 0; _i694 < _size690; ++_i694) { - xfer += iprot->readI64(this->txnIds[_i676]); + xfer += iprot->readI64(this->txnIds[_i694]); } xfer += iprot->readListEnd(); } @@ -15805,10 +16963,10 @@ uint32_t AllocateTableWriteIdsRequest::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("txnIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txnIds.size())); - std::vector ::const_iterator _iter677; - for (_iter677 = this->txnIds.begin(); _iter677 != this->txnIds.end(); ++_iter677) + std::vector ::const_iterator _iter695; + for (_iter695 = this->txnIds.begin(); _iter695 != this->txnIds.end(); ++_iter695) { - xfer += oprot->writeI64((*_iter677)); + xfer += oprot->writeI64((*_iter695)); } xfer += oprot->writeListEnd(); } @@ -15834,15 +16992,15 @@ void swap(AllocateTableWriteIdsRequest &a, AllocateTableWriteIdsRequest &b) { swap(a.tableName, b.tableName); } -AllocateTableWriteIdsRequest::AllocateTableWriteIdsRequest(const AllocateTableWriteIdsRequest& other678) { - txnIds = other678.txnIds; - dbName = other678.dbName; - tableName = other678.tableName; +AllocateTableWriteIdsRequest::AllocateTableWriteIdsRequest(const AllocateTableWriteIdsRequest& other696) { + txnIds = other696.txnIds; + dbName = other696.dbName; + tableName = other696.tableName; } -AllocateTableWriteIdsRequest& AllocateTableWriteIdsRequest::operator=(const AllocateTableWriteIdsRequest& other679) { - txnIds = other679.txnIds; - dbName = other679.dbName; - tableName = other679.tableName; +AllocateTableWriteIdsRequest& AllocateTableWriteIdsRequest::operator=(const AllocateTableWriteIdsRequest& other697) { + txnIds = other697.txnIds; + dbName = other697.dbName; + tableName = other697.tableName; return *this; } void AllocateTableWriteIdsRequest::printTo(std::ostream& out) const { @@ -15946,13 +17104,13 @@ void swap(TxnToWriteId &a, TxnToWriteId &b) { swap(a.writeId, b.writeId); } -TxnToWriteId::TxnToWriteId(const TxnToWriteId& other680) { - txnId = other680.txnId; - writeId = other680.writeId; +TxnToWriteId::TxnToWriteId(const TxnToWriteId& other698) { + txnId = other698.txnId; + writeId = other698.writeId; } -TxnToWriteId& TxnToWriteId::operator=(const TxnToWriteId& other681) { - txnId = other681.txnId; - writeId = other681.writeId; +TxnToWriteId& TxnToWriteId::operator=(const TxnToWriteId& other699) { + txnId = other699.txnId; + writeId = other699.writeId; return *this; } void TxnToWriteId::printTo(std::ostream& out) const { @@ -15998,14 +17156,14 @@ uint32_t AllocateTableWriteIdsResponse::read(::apache::thrift::protocol::TProtoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txnToWriteIds.clear(); - uint32_t _size682; - ::apache::thrift::protocol::TType _etype685; - xfer += iprot->readListBegin(_etype685, _size682); - this->txnToWriteIds.resize(_size682); - uint32_t _i686; - for (_i686 = 0; _i686 < _size682; ++_i686) + uint32_t _size700; + ::apache::thrift::protocol::TType _etype703; + xfer += iprot->readListBegin(_etype703, _size700); + this->txnToWriteIds.resize(_size700); + uint32_t _i704; + for (_i704 = 0; _i704 < _size700; ++_i704) { - xfer += this->txnToWriteIds[_i686].read(iprot); + xfer += this->txnToWriteIds[_i704].read(iprot); } xfer += iprot->readListEnd(); } @@ -16036,10 +17194,10 @@ uint32_t AllocateTableWriteIdsResponse::write(::apache::thrift::protocol::TProto xfer += oprot->writeFieldBegin("txnToWriteIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->txnToWriteIds.size())); - std::vector ::const_iterator _iter687; - for (_iter687 = this->txnToWriteIds.begin(); _iter687 != this->txnToWriteIds.end(); ++_iter687) + std::vector ::const_iterator _iter705; + for (_iter705 = this->txnToWriteIds.begin(); _iter705 != this->txnToWriteIds.end(); ++_iter705) { - xfer += (*_iter687).write(oprot); + xfer += (*_iter705).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16055,11 +17213,11 @@ void swap(AllocateTableWriteIdsResponse &a, AllocateTableWriteIdsResponse &b) { swap(a.txnToWriteIds, b.txnToWriteIds); } -AllocateTableWriteIdsResponse::AllocateTableWriteIdsResponse(const AllocateTableWriteIdsResponse& other688) { - txnToWriteIds = other688.txnToWriteIds; +AllocateTableWriteIdsResponse::AllocateTableWriteIdsResponse(const AllocateTableWriteIdsResponse& other706) { + txnToWriteIds = other706.txnToWriteIds; } -AllocateTableWriteIdsResponse& AllocateTableWriteIdsResponse::operator=(const AllocateTableWriteIdsResponse& other689) { - txnToWriteIds = other689.txnToWriteIds; +AllocateTableWriteIdsResponse& AllocateTableWriteIdsResponse::operator=(const AllocateTableWriteIdsResponse& other707) { + txnToWriteIds = other707.txnToWriteIds; return *this; } void AllocateTableWriteIdsResponse::printTo(std::ostream& out) const { @@ -16137,9 +17295,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast690; - xfer += iprot->readI32(ecast690); - this->type = (LockType::type)ecast690; + int32_t ecast708; + xfer += iprot->readI32(ecast708); + this->type = (LockType::type)ecast708; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -16147,9 +17305,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast691; - xfer += iprot->readI32(ecast691); - this->level = (LockLevel::type)ecast691; + int32_t ecast709; + xfer += iprot->readI32(ecast709); + this->level = (LockLevel::type)ecast709; isset_level = true; } else { xfer += iprot->skip(ftype); @@ -16181,9 +17339,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast692; - xfer += iprot->readI32(ecast692); - this->operationType = (DataOperationType::type)ecast692; + int32_t ecast710; + xfer += iprot->readI32(ecast710); + this->operationType = (DataOperationType::type)ecast710; this->__isset.operationType = true; } else { xfer += iprot->skip(ftype); @@ -16283,27 +17441,27 @@ void swap(LockComponent &a, LockComponent &b) { swap(a.__isset, b.__isset); } -LockComponent::LockComponent(const LockComponent& other693) { - type = other693.type; - level = other693.level; - dbname = other693.dbname; - tablename = other693.tablename; - partitionname = other693.partitionname; - operationType = other693.operationType; - isTransactional = other693.isTransactional; - isDynamicPartitionWrite = other693.isDynamicPartitionWrite; - __isset = other693.__isset; -} -LockComponent& LockComponent::operator=(const LockComponent& other694) { - type = other694.type; - level = other694.level; - dbname = other694.dbname; - tablename = other694.tablename; - partitionname = other694.partitionname; - operationType = other694.operationType; - isTransactional = other694.isTransactional; - isDynamicPartitionWrite = other694.isDynamicPartitionWrite; - __isset = other694.__isset; +LockComponent::LockComponent(const LockComponent& other711) { + type = other711.type; + level = other711.level; + dbname = other711.dbname; + tablename = other711.tablename; + partitionname = other711.partitionname; + operationType = other711.operationType; + isTransactional = other711.isTransactional; + isDynamicPartitionWrite = other711.isDynamicPartitionWrite; + __isset = other711.__isset; +} +LockComponent& LockComponent::operator=(const LockComponent& other712) { + type = other712.type; + level = other712.level; + dbname = other712.dbname; + tablename = other712.tablename; + partitionname = other712.partitionname; + operationType = other712.operationType; + isTransactional = other712.isTransactional; + isDynamicPartitionWrite = other712.isDynamicPartitionWrite; + __isset = other712.__isset; return *this; } void LockComponent::printTo(std::ostream& out) const { @@ -16375,14 +17533,14 @@ uint32_t LockRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->component.clear(); - uint32_t _size695; - ::apache::thrift::protocol::TType _etype698; - xfer += iprot->readListBegin(_etype698, _size695); - this->component.resize(_size695); - uint32_t _i699; - for (_i699 = 0; _i699 < _size695; ++_i699) + uint32_t _size713; + ::apache::thrift::protocol::TType _etype716; + xfer += iprot->readListBegin(_etype716, _size713); + this->component.resize(_size713); + uint32_t _i717; + for (_i717 = 0; _i717 < _size713; ++_i717) { - xfer += this->component[_i699].read(iprot); + xfer += this->component[_i717].read(iprot); } xfer += iprot->readListEnd(); } @@ -16449,10 +17607,10 @@ uint32_t LockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const xfer += oprot->writeFieldBegin("component", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->component.size())); - std::vector ::const_iterator _iter700; - for (_iter700 = this->component.begin(); _iter700 != this->component.end(); ++_iter700) + std::vector ::const_iterator _iter718; + for (_iter718 = this->component.begin(); _iter718 != this->component.end(); ++_iter718) { - xfer += (*_iter700).write(oprot); + xfer += (*_iter718).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16491,21 +17649,21 @@ void swap(LockRequest &a, LockRequest &b) { swap(a.__isset, b.__isset); } -LockRequest::LockRequest(const LockRequest& other701) { - component = other701.component; - txnid = other701.txnid; - user = other701.user; - hostname = other701.hostname; - agentInfo = other701.agentInfo; - __isset = other701.__isset; -} -LockRequest& LockRequest::operator=(const LockRequest& other702) { - component = other702.component; - txnid = other702.txnid; - user = other702.user; - hostname = other702.hostname; - agentInfo = other702.agentInfo; - __isset = other702.__isset; +LockRequest::LockRequest(const LockRequest& other719) { + component = other719.component; + txnid = other719.txnid; + user = other719.user; + hostname = other719.hostname; + agentInfo = other719.agentInfo; + __isset = other719.__isset; +} +LockRequest& LockRequest::operator=(const LockRequest& other720) { + component = other720.component; + txnid = other720.txnid; + user = other720.user; + hostname = other720.hostname; + agentInfo = other720.agentInfo; + __isset = other720.__isset; return *this; } void LockRequest::printTo(std::ostream& out) const { @@ -16565,9 +17723,9 @@ uint32_t LockResponse::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast703; - xfer += iprot->readI32(ecast703); - this->state = (LockState::type)ecast703; + int32_t ecast721; + xfer += iprot->readI32(ecast721); + this->state = (LockState::type)ecast721; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -16613,13 +17771,13 @@ void swap(LockResponse &a, LockResponse &b) { swap(a.state, b.state); } -LockResponse::LockResponse(const LockResponse& other704) { - lockid = other704.lockid; - state = other704.state; +LockResponse::LockResponse(const LockResponse& other722) { + lockid = other722.lockid; + state = other722.state; } -LockResponse& LockResponse::operator=(const LockResponse& other705) { - lockid = other705.lockid; - state = other705.state; +LockResponse& LockResponse::operator=(const LockResponse& other723) { + lockid = other723.lockid; + state = other723.state; return *this; } void LockResponse::printTo(std::ostream& out) const { @@ -16741,17 +17899,17 @@ void swap(CheckLockRequest &a, CheckLockRequest &b) { swap(a.__isset, b.__isset); } -CheckLockRequest::CheckLockRequest(const CheckLockRequest& other706) { - lockid = other706.lockid; - txnid = other706.txnid; - elapsed_ms = other706.elapsed_ms; - __isset = other706.__isset; +CheckLockRequest::CheckLockRequest(const CheckLockRequest& other724) { + lockid = other724.lockid; + txnid = other724.txnid; + elapsed_ms = other724.elapsed_ms; + __isset = other724.__isset; } -CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other707) { - lockid = other707.lockid; - txnid = other707.txnid; - elapsed_ms = other707.elapsed_ms; - __isset = other707.__isset; +CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other725) { + lockid = other725.lockid; + txnid = other725.txnid; + elapsed_ms = other725.elapsed_ms; + __isset = other725.__isset; return *this; } void CheckLockRequest::printTo(std::ostream& out) const { @@ -16835,11 +17993,11 @@ void swap(UnlockRequest &a, UnlockRequest &b) { swap(a.lockid, b.lockid); } -UnlockRequest::UnlockRequest(const UnlockRequest& other708) { - lockid = other708.lockid; +UnlockRequest::UnlockRequest(const UnlockRequest& other726) { + lockid = other726.lockid; } -UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other709) { - lockid = other709.lockid; +UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other727) { + lockid = other727.lockid; return *this; } void UnlockRequest::printTo(std::ostream& out) const { @@ -16978,19 +18136,19 @@ void swap(ShowLocksRequest &a, ShowLocksRequest &b) { swap(a.__isset, b.__isset); } -ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other710) { - dbname = other710.dbname; - tablename = other710.tablename; - partname = other710.partname; - isExtended = other710.isExtended; - __isset = other710.__isset; +ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other728) { + dbname = other728.dbname; + tablename = other728.tablename; + partname = other728.partname; + isExtended = other728.isExtended; + __isset = other728.__isset; } -ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other711) { - dbname = other711.dbname; - tablename = other711.tablename; - partname = other711.partname; - isExtended = other711.isExtended; - __isset = other711.__isset; +ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other729) { + dbname = other729.dbname; + tablename = other729.tablename; + partname = other729.partname; + isExtended = other729.isExtended; + __isset = other729.__isset; return *this; } void ShowLocksRequest::printTo(std::ostream& out) const { @@ -17143,9 +18301,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast712; - xfer += iprot->readI32(ecast712); - this->state = (LockState::type)ecast712; + int32_t ecast730; + xfer += iprot->readI32(ecast730); + this->state = (LockState::type)ecast730; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -17153,9 +18311,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast713; - xfer += iprot->readI32(ecast713); - this->type = (LockType::type)ecast713; + int32_t ecast731; + xfer += iprot->readI32(ecast731); + this->type = (LockType::type)ecast731; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -17371,43 +18529,43 @@ void swap(ShowLocksResponseElement &a, ShowLocksResponseElement &b) { swap(a.__isset, b.__isset); } -ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other714) { - lockid = other714.lockid; - dbname = other714.dbname; - tablename = other714.tablename; - partname = other714.partname; - state = other714.state; - type = other714.type; - txnid = other714.txnid; - lastheartbeat = other714.lastheartbeat; - acquiredat = other714.acquiredat; - user = other714.user; - hostname = other714.hostname; - heartbeatCount = other714.heartbeatCount; - agentInfo = other714.agentInfo; - blockedByExtId = other714.blockedByExtId; - blockedByIntId = other714.blockedByIntId; - lockIdInternal = other714.lockIdInternal; - __isset = other714.__isset; -} -ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other715) { - lockid = other715.lockid; - dbname = other715.dbname; - tablename = other715.tablename; - partname = other715.partname; - state = other715.state; - type = other715.type; - txnid = other715.txnid; - lastheartbeat = other715.lastheartbeat; - acquiredat = other715.acquiredat; - user = other715.user; - hostname = other715.hostname; - heartbeatCount = other715.heartbeatCount; - agentInfo = other715.agentInfo; - blockedByExtId = other715.blockedByExtId; - blockedByIntId = other715.blockedByIntId; - lockIdInternal = other715.lockIdInternal; - __isset = other715.__isset; +ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other732) { + lockid = other732.lockid; + dbname = other732.dbname; + tablename = other732.tablename; + partname = other732.partname; + state = other732.state; + type = other732.type; + txnid = other732.txnid; + lastheartbeat = other732.lastheartbeat; + acquiredat = other732.acquiredat; + user = other732.user; + hostname = other732.hostname; + heartbeatCount = other732.heartbeatCount; + agentInfo = other732.agentInfo; + blockedByExtId = other732.blockedByExtId; + blockedByIntId = other732.blockedByIntId; + lockIdInternal = other732.lockIdInternal; + __isset = other732.__isset; +} +ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other733) { + lockid = other733.lockid; + dbname = other733.dbname; + tablename = other733.tablename; + partname = other733.partname; + state = other733.state; + type = other733.type; + txnid = other733.txnid; + lastheartbeat = other733.lastheartbeat; + acquiredat = other733.acquiredat; + user = other733.user; + hostname = other733.hostname; + heartbeatCount = other733.heartbeatCount; + agentInfo = other733.agentInfo; + blockedByExtId = other733.blockedByExtId; + blockedByIntId = other733.blockedByIntId; + lockIdInternal = other733.lockIdInternal; + __isset = other733.__isset; return *this; } void ShowLocksResponseElement::printTo(std::ostream& out) const { @@ -17466,14 +18624,14 @@ uint32_t ShowLocksResponse::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->locks.clear(); - uint32_t _size716; - ::apache::thrift::protocol::TType _etype719; - xfer += iprot->readListBegin(_etype719, _size716); - this->locks.resize(_size716); - uint32_t _i720; - for (_i720 = 0; _i720 < _size716; ++_i720) + uint32_t _size734; + ::apache::thrift::protocol::TType _etype737; + xfer += iprot->readListBegin(_etype737, _size734); + this->locks.resize(_size734); + uint32_t _i738; + for (_i738 = 0; _i738 < _size734; ++_i738) { - xfer += this->locks[_i720].read(iprot); + xfer += this->locks[_i738].read(iprot); } xfer += iprot->readListEnd(); } @@ -17502,10 +18660,10 @@ uint32_t ShowLocksResponse::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("locks", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->locks.size())); - std::vector ::const_iterator _iter721; - for (_iter721 = this->locks.begin(); _iter721 != this->locks.end(); ++_iter721) + std::vector ::const_iterator _iter739; + for (_iter739 = this->locks.begin(); _iter739 != this->locks.end(); ++_iter739) { - xfer += (*_iter721).write(oprot); + xfer += (*_iter739).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17522,13 +18680,13 @@ void swap(ShowLocksResponse &a, ShowLocksResponse &b) { swap(a.__isset, b.__isset); } -ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other722) { - locks = other722.locks; - __isset = other722.__isset; +ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other740) { + locks = other740.locks; + __isset = other740.__isset; } -ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other723) { - locks = other723.locks; - __isset = other723.__isset; +ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other741) { + locks = other741.locks; + __isset = other741.__isset; return *this; } void ShowLocksResponse::printTo(std::ostream& out) const { @@ -17629,15 +18787,15 @@ void swap(HeartbeatRequest &a, HeartbeatRequest &b) { swap(a.__isset, b.__isset); } -HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other724) { - lockid = other724.lockid; - txnid = other724.txnid; - __isset = other724.__isset; +HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other742) { + lockid = other742.lockid; + txnid = other742.txnid; + __isset = other742.__isset; } -HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other725) { - lockid = other725.lockid; - txnid = other725.txnid; - __isset = other725.__isset; +HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other743) { + lockid = other743.lockid; + txnid = other743.txnid; + __isset = other743.__isset; return *this; } void HeartbeatRequest::printTo(std::ostream& out) const { @@ -17740,13 +18898,13 @@ void swap(HeartbeatTxnRangeRequest &a, HeartbeatTxnRangeRequest &b) { swap(a.max, b.max); } -HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other726) { - min = other726.min; - max = other726.max; +HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other744) { + min = other744.min; + max = other744.max; } -HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other727) { - min = other727.min; - max = other727.max; +HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other745) { + min = other745.min; + max = other745.max; return *this; } void HeartbeatTxnRangeRequest::printTo(std::ostream& out) const { @@ -17797,15 +18955,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->aborted.clear(); - uint32_t _size728; - ::apache::thrift::protocol::TType _etype731; - xfer += iprot->readSetBegin(_etype731, _size728); - uint32_t _i732; - for (_i732 = 0; _i732 < _size728; ++_i732) + uint32_t _size746; + ::apache::thrift::protocol::TType _etype749; + xfer += iprot->readSetBegin(_etype749, _size746); + uint32_t _i750; + for (_i750 = 0; _i750 < _size746; ++_i750) { - int64_t _elem733; - xfer += iprot->readI64(_elem733); - this->aborted.insert(_elem733); + int64_t _elem751; + xfer += iprot->readI64(_elem751); + this->aborted.insert(_elem751); } xfer += iprot->readSetEnd(); } @@ -17818,15 +18976,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->nosuch.clear(); - uint32_t _size734; - ::apache::thrift::protocol::TType _etype737; - xfer += iprot->readSetBegin(_etype737, _size734); - uint32_t _i738; - for (_i738 = 0; _i738 < _size734; ++_i738) + uint32_t _size752; + ::apache::thrift::protocol::TType _etype755; + xfer += iprot->readSetBegin(_etype755, _size752); + uint32_t _i756; + for (_i756 = 0; _i756 < _size752; ++_i756) { - int64_t _elem739; - xfer += iprot->readI64(_elem739); - this->nosuch.insert(_elem739); + int64_t _elem757; + xfer += iprot->readI64(_elem757); + this->nosuch.insert(_elem757); } xfer += iprot->readSetEnd(); } @@ -17859,10 +19017,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("aborted", ::apache::thrift::protocol::T_SET, 1); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->aborted.size())); - std::set ::const_iterator _iter740; - for (_iter740 = this->aborted.begin(); _iter740 != this->aborted.end(); ++_iter740) + std::set ::const_iterator _iter758; + for (_iter758 = this->aborted.begin(); _iter758 != this->aborted.end(); ++_iter758) { - xfer += oprot->writeI64((*_iter740)); + xfer += oprot->writeI64((*_iter758)); } xfer += oprot->writeSetEnd(); } @@ -17871,10 +19029,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("nosuch", ::apache::thrift::protocol::T_SET, 2); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->nosuch.size())); - std::set ::const_iterator _iter741; - for (_iter741 = this->nosuch.begin(); _iter741 != this->nosuch.end(); ++_iter741) + std::set ::const_iterator _iter759; + for (_iter759 = this->nosuch.begin(); _iter759 != this->nosuch.end(); ++_iter759) { - xfer += oprot->writeI64((*_iter741)); + xfer += oprot->writeI64((*_iter759)); } xfer += oprot->writeSetEnd(); } @@ -17891,13 +19049,13 @@ void swap(HeartbeatTxnRangeResponse &a, HeartbeatTxnRangeResponse &b) { swap(a.nosuch, b.nosuch); } -HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other742) { - aborted = other742.aborted; - nosuch = other742.nosuch; +HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other760) { + aborted = other760.aborted; + nosuch = other760.nosuch; } -HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other743) { - aborted = other743.aborted; - nosuch = other743.nosuch; +HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other761) { + aborted = other761.aborted; + nosuch = other761.nosuch; return *this; } void HeartbeatTxnRangeResponse::printTo(std::ostream& out) const { @@ -17990,9 +19148,9 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast744; - xfer += iprot->readI32(ecast744); - this->type = (CompactionType::type)ecast744; + int32_t ecast762; + xfer += iprot->readI32(ecast762); + this->type = (CompactionType::type)ecast762; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -18010,17 +19168,17 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size745; - ::apache::thrift::protocol::TType _ktype746; - ::apache::thrift::protocol::TType _vtype747; - xfer += iprot->readMapBegin(_ktype746, _vtype747, _size745); - uint32_t _i749; - for (_i749 = 0; _i749 < _size745; ++_i749) + uint32_t _size763; + ::apache::thrift::protocol::TType _ktype764; + ::apache::thrift::protocol::TType _vtype765; + xfer += iprot->readMapBegin(_ktype764, _vtype765, _size763); + uint32_t _i767; + for (_i767 = 0; _i767 < _size763; ++_i767) { - std::string _key750; - xfer += iprot->readString(_key750); - std::string& _val751 = this->properties[_key750]; - xfer += iprot->readString(_val751); + std::string _key768; + xfer += iprot->readString(_key768); + std::string& _val769 = this->properties[_key768]; + xfer += iprot->readString(_val769); } xfer += iprot->readMapEnd(); } @@ -18078,11 +19236,11 @@ uint32_t CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 6); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter752; - for (_iter752 = this->properties.begin(); _iter752 != this->properties.end(); ++_iter752) + std::map ::const_iterator _iter770; + for (_iter770 = this->properties.begin(); _iter770 != this->properties.end(); ++_iter770) { - xfer += oprot->writeString(_iter752->first); - xfer += oprot->writeString(_iter752->second); + xfer += oprot->writeString(_iter770->first); + xfer += oprot->writeString(_iter770->second); } xfer += oprot->writeMapEnd(); } @@ -18104,23 +19262,23 @@ void swap(CompactionRequest &a, CompactionRequest &b) { swap(a.__isset, b.__isset); } -CompactionRequest::CompactionRequest(const CompactionRequest& other753) { - dbname = other753.dbname; - tablename = other753.tablename; - partitionname = other753.partitionname; - type = other753.type; - runas = other753.runas; - properties = other753.properties; - __isset = other753.__isset; -} -CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other754) { - dbname = other754.dbname; - tablename = other754.tablename; - partitionname = other754.partitionname; - type = other754.type; - runas = other754.runas; - properties = other754.properties; - __isset = other754.__isset; +CompactionRequest::CompactionRequest(const CompactionRequest& other771) { + dbname = other771.dbname; + tablename = other771.tablename; + partitionname = other771.partitionname; + type = other771.type; + runas = other771.runas; + properties = other771.properties; + __isset = other771.__isset; +} +CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other772) { + dbname = other772.dbname; + tablename = other772.tablename; + partitionname = other772.partitionname; + type = other772.type; + runas = other772.runas; + properties = other772.properties; + __isset = other772.__isset; return *this; } void CompactionRequest::printTo(std::ostream& out) const { @@ -18247,15 +19405,15 @@ void swap(CompactionResponse &a, CompactionResponse &b) { swap(a.accepted, b.accepted); } -CompactionResponse::CompactionResponse(const CompactionResponse& other755) { - id = other755.id; - state = other755.state; - accepted = other755.accepted; +CompactionResponse::CompactionResponse(const CompactionResponse& other773) { + id = other773.id; + state = other773.state; + accepted = other773.accepted; } -CompactionResponse& CompactionResponse::operator=(const CompactionResponse& other756) { - id = other756.id; - state = other756.state; - accepted = other756.accepted; +CompactionResponse& CompactionResponse::operator=(const CompactionResponse& other774) { + id = other774.id; + state = other774.state; + accepted = other774.accepted; return *this; } void CompactionResponse::printTo(std::ostream& out) const { @@ -18316,11 +19474,11 @@ void swap(ShowCompactRequest &a, ShowCompactRequest &b) { (void) b; } -ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other757) { - (void) other757; +ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other775) { + (void) other775; } -ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other758) { - (void) other758; +ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other776) { + (void) other776; return *this; } void ShowCompactRequest::printTo(std::ostream& out) const { @@ -18446,9 +19604,9 @@ uint32_t ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol* break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast759; - xfer += iprot->readI32(ecast759); - this->type = (CompactionType::type)ecast759; + int32_t ecast777; + xfer += iprot->readI32(ecast777); + this->type = (CompactionType::type)ecast777; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -18635,37 +19793,37 @@ void swap(ShowCompactResponseElement &a, ShowCompactResponseElement &b) { swap(a.__isset, b.__isset); } -ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other760) { - dbname = other760.dbname; - tablename = other760.tablename; - partitionname = other760.partitionname; - type = other760.type; - state = other760.state; - workerid = other760.workerid; - start = other760.start; - runAs = other760.runAs; - hightestTxnId = other760.hightestTxnId; - metaInfo = other760.metaInfo; - endTime = other760.endTime; - hadoopJobId = other760.hadoopJobId; - id = other760.id; - __isset = other760.__isset; -} -ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other761) { - dbname = other761.dbname; - tablename = other761.tablename; - partitionname = other761.partitionname; - type = other761.type; - state = other761.state; - workerid = other761.workerid; - start = other761.start; - runAs = other761.runAs; - hightestTxnId = other761.hightestTxnId; - metaInfo = other761.metaInfo; - endTime = other761.endTime; - hadoopJobId = other761.hadoopJobId; - id = other761.id; - __isset = other761.__isset; +ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other778) { + dbname = other778.dbname; + tablename = other778.tablename; + partitionname = other778.partitionname; + type = other778.type; + state = other778.state; + workerid = other778.workerid; + start = other778.start; + runAs = other778.runAs; + hightestTxnId = other778.hightestTxnId; + metaInfo = other778.metaInfo; + endTime = other778.endTime; + hadoopJobId = other778.hadoopJobId; + id = other778.id; + __isset = other778.__isset; +} +ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other779) { + dbname = other779.dbname; + tablename = other779.tablename; + partitionname = other779.partitionname; + type = other779.type; + state = other779.state; + workerid = other779.workerid; + start = other779.start; + runAs = other779.runAs; + hightestTxnId = other779.hightestTxnId; + metaInfo = other779.metaInfo; + endTime = other779.endTime; + hadoopJobId = other779.hadoopJobId; + id = other779.id; + __isset = other779.__isset; return *this; } void ShowCompactResponseElement::printTo(std::ostream& out) const { @@ -18722,14 +19880,14 @@ uint32_t ShowCompactResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->compacts.clear(); - uint32_t _size762; - ::apache::thrift::protocol::TType _etype765; - xfer += iprot->readListBegin(_etype765, _size762); - this->compacts.resize(_size762); - uint32_t _i766; - for (_i766 = 0; _i766 < _size762; ++_i766) + uint32_t _size780; + ::apache::thrift::protocol::TType _etype783; + xfer += iprot->readListBegin(_etype783, _size780); + this->compacts.resize(_size780); + uint32_t _i784; + for (_i784 = 0; _i784 < _size780; ++_i784) { - xfer += this->compacts[_i766].read(iprot); + xfer += this->compacts[_i784].read(iprot); } xfer += iprot->readListEnd(); } @@ -18760,10 +19918,10 @@ uint32_t ShowCompactResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("compacts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->compacts.size())); - std::vector ::const_iterator _iter767; - for (_iter767 = this->compacts.begin(); _iter767 != this->compacts.end(); ++_iter767) + std::vector ::const_iterator _iter785; + for (_iter785 = this->compacts.begin(); _iter785 != this->compacts.end(); ++_iter785) { - xfer += (*_iter767).write(oprot); + xfer += (*_iter785).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18779,11 +19937,11 @@ void swap(ShowCompactResponse &a, ShowCompactResponse &b) { swap(a.compacts, b.compacts); } -ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other768) { - compacts = other768.compacts; +ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other786) { + compacts = other786.compacts; } -ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other769) { - compacts = other769.compacts; +ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other787) { + compacts = other787.compacts; return *this; } void ShowCompactResponse::printTo(std::ostream& out) const { @@ -18885,14 +20043,14 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionnames.clear(); - uint32_t _size770; - ::apache::thrift::protocol::TType _etype773; - xfer += iprot->readListBegin(_etype773, _size770); - this->partitionnames.resize(_size770); - uint32_t _i774; - for (_i774 = 0; _i774 < _size770; ++_i774) + uint32_t _size788; + ::apache::thrift::protocol::TType _etype791; + xfer += iprot->readListBegin(_etype791, _size788); + this->partitionnames.resize(_size788); + uint32_t _i792; + for (_i792 = 0; _i792 < _size788; ++_i792) { - xfer += iprot->readString(this->partitionnames[_i774]); + xfer += iprot->readString(this->partitionnames[_i792]); } xfer += iprot->readListEnd(); } @@ -18903,9 +20061,9 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast775; - xfer += iprot->readI32(ecast775); - this->operationType = (DataOperationType::type)ecast775; + int32_t ecast793; + xfer += iprot->readI32(ecast793); + this->operationType = (DataOperationType::type)ecast793; this->__isset.operationType = true; } else { xfer += iprot->skip(ftype); @@ -18957,10 +20115,10 @@ uint32_t AddDynamicPartitions::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("partitionnames", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionnames.size())); - std::vector ::const_iterator _iter776; - for (_iter776 = this->partitionnames.begin(); _iter776 != this->partitionnames.end(); ++_iter776) + std::vector ::const_iterator _iter794; + for (_iter794 = this->partitionnames.begin(); _iter794 != this->partitionnames.end(); ++_iter794) { - xfer += oprot->writeString((*_iter776)); + xfer += oprot->writeString((*_iter794)); } xfer += oprot->writeListEnd(); } @@ -18987,23 +20145,23 @@ void swap(AddDynamicPartitions &a, AddDynamicPartitions &b) { swap(a.__isset, b.__isset); } -AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other777) { - txnid = other777.txnid; - writeid = other777.writeid; - dbname = other777.dbname; - tablename = other777.tablename; - partitionnames = other777.partitionnames; - operationType = other777.operationType; - __isset = other777.__isset; -} -AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other778) { - txnid = other778.txnid; - writeid = other778.writeid; - dbname = other778.dbname; - tablename = other778.tablename; - partitionnames = other778.partitionnames; - operationType = other778.operationType; - __isset = other778.__isset; +AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other795) { + txnid = other795.txnid; + writeid = other795.writeid; + dbname = other795.dbname; + tablename = other795.tablename; + partitionnames = other795.partitionnames; + operationType = other795.operationType; + __isset = other795.__isset; +} +AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other796) { + txnid = other796.txnid; + writeid = other796.writeid; + dbname = other796.dbname; + tablename = other796.tablename; + partitionnames = other796.partitionnames; + operationType = other796.operationType; + __isset = other796.__isset; return *this; } void AddDynamicPartitions::printTo(std::ostream& out) const { @@ -19186,23 +20344,23 @@ void swap(BasicTxnInfo &a, BasicTxnInfo &b) { swap(a.__isset, b.__isset); } -BasicTxnInfo::BasicTxnInfo(const BasicTxnInfo& other779) { - isnull = other779.isnull; - time = other779.time; - txnid = other779.txnid; - dbname = other779.dbname; - tablename = other779.tablename; - partitionname = other779.partitionname; - __isset = other779.__isset; -} -BasicTxnInfo& BasicTxnInfo::operator=(const BasicTxnInfo& other780) { - isnull = other780.isnull; - time = other780.time; - txnid = other780.txnid; - dbname = other780.dbname; - tablename = other780.tablename; - partitionname = other780.partitionname; - __isset = other780.__isset; +BasicTxnInfo::BasicTxnInfo(const BasicTxnInfo& other797) { + isnull = other797.isnull; + time = other797.time; + txnid = other797.txnid; + dbname = other797.dbname; + tablename = other797.tablename; + partitionname = other797.partitionname; + __isset = other797.__isset; +} +BasicTxnInfo& BasicTxnInfo::operator=(const BasicTxnInfo& other798) { + isnull = other798.isnull; + time = other798.time; + txnid = other798.txnid; + dbname = other798.dbname; + tablename = other798.tablename; + partitionname = other798.partitionname; + __isset = other798.__isset; return *this; } void BasicTxnInfo::printTo(std::ostream& out) const { @@ -19222,6 +20380,10 @@ CreationMetadata::~CreationMetadata() throw() { } +void CreationMetadata::__set_catName(const std::string& val) { + this->catName = val; +} + void CreationMetadata::__set_dbName(const std::string& val) { this->dbName = val; } @@ -19251,6 +20413,7 @@ uint32_t CreationMetadata::read(::apache::thrift::protocol::TProtocol* iprot) { using ::apache::thrift::protocol::TProtocolException; + bool isset_catName = false; bool isset_dbName = false; bool isset_tblName = false; bool isset_tablesUsed = false; @@ -19265,13 +20428,21 @@ uint32_t CreationMetadata::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + isset_catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->dbName); isset_dbName = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->tblName); isset_tblName = true; @@ -19279,19 +20450,19 @@ uint32_t CreationMetadata::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; - case 3: + case 4: if (ftype == ::apache::thrift::protocol::T_SET) { { this->tablesUsed.clear(); - uint32_t _size781; - ::apache::thrift::protocol::TType _etype784; - xfer += iprot->readSetBegin(_etype784, _size781); - uint32_t _i785; - for (_i785 = 0; _i785 < _size781; ++_i785) + uint32_t _size799; + ::apache::thrift::protocol::TType _etype802; + xfer += iprot->readSetBegin(_etype802, _size799); + uint32_t _i803; + for (_i803 = 0; _i803 < _size799; ++_i803) { - std::string _elem786; - xfer += iprot->readString(_elem786); - this->tablesUsed.insert(_elem786); + std::string _elem804; + xfer += iprot->readString(_elem804); + this->tablesUsed.insert(_elem804); } xfer += iprot->readSetEnd(); } @@ -19300,7 +20471,7 @@ uint32_t CreationMetadata::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; - case 4: + case 5: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->validTxnList); this->__isset.validTxnList = true; @@ -19317,6 +20488,8 @@ uint32_t CreationMetadata::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->readStructEnd(); + if (!isset_catName) + throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_dbName) throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_tblName) @@ -19331,28 +20504,32 @@ uint32_t CreationMetadata::write(::apache::thrift::protocol::TProtocol* oprot) c apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("CreationMetadata"); - xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->tblName); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 3); + xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 4); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tablesUsed.size())); - std::set ::const_iterator _iter787; - for (_iter787 = this->tablesUsed.begin(); _iter787 != this->tablesUsed.end(); ++_iter787) + std::set ::const_iterator _iter805; + for (_iter805 = this->tablesUsed.begin(); _iter805 != this->tablesUsed.end(); ++_iter805) { - xfer += oprot->writeString((*_iter787)); + xfer += oprot->writeString((*_iter805)); } xfer += oprot->writeSetEnd(); } xfer += oprot->writeFieldEnd(); if (this->__isset.validTxnList) { - xfer += oprot->writeFieldBegin("validTxnList", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeFieldBegin("validTxnList", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->validTxnList); xfer += oprot->writeFieldEnd(); } @@ -19363,6 +20540,7 @@ uint32_t CreationMetadata::write(::apache::thrift::protocol::TProtocol* oprot) c void swap(CreationMetadata &a, CreationMetadata &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.dbName, b.dbName); swap(a.tblName, b.tblName); swap(a.tablesUsed, b.tablesUsed); @@ -19370,25 +20548,28 @@ void swap(CreationMetadata &a, CreationMetadata &b) { swap(a.__isset, b.__isset); } -CreationMetadata::CreationMetadata(const CreationMetadata& other788) { - dbName = other788.dbName; - tblName = other788.tblName; - tablesUsed = other788.tablesUsed; - validTxnList = other788.validTxnList; - __isset = other788.__isset; -} -CreationMetadata& CreationMetadata::operator=(const CreationMetadata& other789) { - dbName = other789.dbName; - tblName = other789.tblName; - tablesUsed = other789.tablesUsed; - validTxnList = other789.validTxnList; - __isset = other789.__isset; +CreationMetadata::CreationMetadata(const CreationMetadata& other806) { + catName = other806.catName; + dbName = other806.dbName; + tblName = other806.tblName; + tablesUsed = other806.tablesUsed; + validTxnList = other806.validTxnList; + __isset = other806.__isset; +} +CreationMetadata& CreationMetadata::operator=(const CreationMetadata& other807) { + catName = other807.catName; + dbName = other807.dbName; + tblName = other807.tblName; + tablesUsed = other807.tablesUsed; + validTxnList = other807.validTxnList; + __isset = other807.__isset; return *this; } void CreationMetadata::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "CreationMetadata("; - out << "dbName=" << to_string(dbName); + out << "catName=" << to_string(catName); + out << ", " << "dbName=" << to_string(dbName); out << ", " << "tblName=" << to_string(tblName); out << ", " << "tablesUsed=" << to_string(tablesUsed); out << ", " << "validTxnList="; (__isset.validTxnList ? (out << to_string(validTxnList)) : (out << "")); @@ -19487,15 +20668,15 @@ void swap(NotificationEventRequest &a, NotificationEventRequest &b) { swap(a.__isset, b.__isset); } -NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other790) { - lastEvent = other790.lastEvent; - maxEvents = other790.maxEvents; - __isset = other790.__isset; +NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other808) { + lastEvent = other808.lastEvent; + maxEvents = other808.maxEvents; + __isset = other808.__isset; } -NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other791) { - lastEvent = other791.lastEvent; - maxEvents = other791.maxEvents; - __isset = other791.__isset; +NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other809) { + lastEvent = other809.lastEvent; + maxEvents = other809.maxEvents; + __isset = other809.__isset; return *this; } void NotificationEventRequest::printTo(std::ostream& out) const { @@ -19542,6 +20723,11 @@ void NotificationEvent::__set_messageFormat(const std::string& val) { __isset.messageFormat = true; } +void NotificationEvent::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t NotificationEvent::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -19623,6 +20809,14 @@ uint32_t NotificationEvent::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 8: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -19679,6 +20873,11 @@ uint32_t NotificationEvent::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeString(this->messageFormat); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 8); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -19693,28 +20892,31 @@ void swap(NotificationEvent &a, NotificationEvent &b) { swap(a.tableName, b.tableName); swap(a.message, b.message); swap(a.messageFormat, b.messageFormat); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -NotificationEvent::NotificationEvent(const NotificationEvent& other792) { - eventId = other792.eventId; - eventTime = other792.eventTime; - eventType = other792.eventType; - dbName = other792.dbName; - tableName = other792.tableName; - message = other792.message; - messageFormat = other792.messageFormat; - __isset = other792.__isset; -} -NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other793) { - eventId = other793.eventId; - eventTime = other793.eventTime; - eventType = other793.eventType; - dbName = other793.dbName; - tableName = other793.tableName; - message = other793.message; - messageFormat = other793.messageFormat; - __isset = other793.__isset; +NotificationEvent::NotificationEvent(const NotificationEvent& other810) { + eventId = other810.eventId; + eventTime = other810.eventTime; + eventType = other810.eventType; + dbName = other810.dbName; + tableName = other810.tableName; + message = other810.message; + messageFormat = other810.messageFormat; + catName = other810.catName; + __isset = other810.__isset; +} +NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other811) { + eventId = other811.eventId; + eventTime = other811.eventTime; + eventType = other811.eventType; + dbName = other811.dbName; + tableName = other811.tableName; + message = other811.message; + messageFormat = other811.messageFormat; + catName = other811.catName; + __isset = other811.__isset; return *this; } void NotificationEvent::printTo(std::ostream& out) const { @@ -19727,6 +20929,7 @@ void NotificationEvent::printTo(std::ostream& out) const { out << ", " << "tableName="; (__isset.tableName ? (out << to_string(tableName)) : (out << "")); out << ", " << "message=" << to_string(message); out << ", " << "messageFormat="; (__isset.messageFormat ? (out << to_string(messageFormat)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -19765,14 +20968,14 @@ uint32_t NotificationEventResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->events.clear(); - uint32_t _size794; - ::apache::thrift::protocol::TType _etype797; - xfer += iprot->readListBegin(_etype797, _size794); - this->events.resize(_size794); - uint32_t _i798; - for (_i798 = 0; _i798 < _size794; ++_i798) + uint32_t _size812; + ::apache::thrift::protocol::TType _etype815; + xfer += iprot->readListBegin(_etype815, _size812); + this->events.resize(_size812); + uint32_t _i816; + for (_i816 = 0; _i816 < _size812; ++_i816) { - xfer += this->events[_i798].read(iprot); + xfer += this->events[_i816].read(iprot); } xfer += iprot->readListEnd(); } @@ -19803,10 +21006,10 @@ uint32_t NotificationEventResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("events", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->events.size())); - std::vector ::const_iterator _iter799; - for (_iter799 = this->events.begin(); _iter799 != this->events.end(); ++_iter799) + std::vector ::const_iterator _iter817; + for (_iter817 = this->events.begin(); _iter817 != this->events.end(); ++_iter817) { - xfer += (*_iter799).write(oprot); + xfer += (*_iter817).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19822,11 +21025,11 @@ void swap(NotificationEventResponse &a, NotificationEventResponse &b) { swap(a.events, b.events); } -NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other800) { - events = other800.events; +NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other818) { + events = other818.events; } -NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other801) { - events = other801.events; +NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other819) { + events = other819.events; return *this; } void NotificationEventResponse::printTo(std::ostream& out) const { @@ -19908,11 +21111,11 @@ void swap(CurrentNotificationEventId &a, CurrentNotificationEventId &b) { swap(a.eventId, b.eventId); } -CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other802) { - eventId = other802.eventId; +CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other820) { + eventId = other820.eventId; } -CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other803) { - eventId = other803.eventId; +CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other821) { + eventId = other821.eventId; return *this; } void CurrentNotificationEventId::printTo(std::ostream& out) const { @@ -19935,6 +21138,11 @@ void NotificationEventsCountRequest::__set_dbName(const std::string& val) { this->dbName = val; } +void NotificationEventsCountRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t NotificationEventsCountRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -19974,6 +21182,14 @@ uint32_t NotificationEventsCountRequest::read(::apache::thrift::protocol::TProto xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -20003,6 +21219,11 @@ uint32_t NotificationEventsCountRequest::write(::apache::thrift::protocol::TProt xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -20012,15 +21233,21 @@ void swap(NotificationEventsCountRequest &a, NotificationEventsCountRequest &b) using ::std::swap; swap(a.fromEventId, b.fromEventId); swap(a.dbName, b.dbName); + swap(a.catName, b.catName); + swap(a.__isset, b.__isset); } -NotificationEventsCountRequest::NotificationEventsCountRequest(const NotificationEventsCountRequest& other804) { - fromEventId = other804.fromEventId; - dbName = other804.dbName; +NotificationEventsCountRequest::NotificationEventsCountRequest(const NotificationEventsCountRequest& other822) { + fromEventId = other822.fromEventId; + dbName = other822.dbName; + catName = other822.catName; + __isset = other822.__isset; } -NotificationEventsCountRequest& NotificationEventsCountRequest::operator=(const NotificationEventsCountRequest& other805) { - fromEventId = other805.fromEventId; - dbName = other805.dbName; +NotificationEventsCountRequest& NotificationEventsCountRequest::operator=(const NotificationEventsCountRequest& other823) { + fromEventId = other823.fromEventId; + dbName = other823.dbName; + catName = other823.catName; + __isset = other823.__isset; return *this; } void NotificationEventsCountRequest::printTo(std::ostream& out) const { @@ -20028,6 +21255,7 @@ void NotificationEventsCountRequest::printTo(std::ostream& out) const { out << "NotificationEventsCountRequest("; out << "fromEventId=" << to_string(fromEventId); out << ", " << "dbName=" << to_string(dbName); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -20103,11 +21331,11 @@ void swap(NotificationEventsCountResponse &a, NotificationEventsCountResponse &b swap(a.eventsCount, b.eventsCount); } -NotificationEventsCountResponse::NotificationEventsCountResponse(const NotificationEventsCountResponse& other806) { - eventsCount = other806.eventsCount; +NotificationEventsCountResponse::NotificationEventsCountResponse(const NotificationEventsCountResponse& other824) { + eventsCount = other824.eventsCount; } -NotificationEventsCountResponse& NotificationEventsCountResponse::operator=(const NotificationEventsCountResponse& other807) { - eventsCount = other807.eventsCount; +NotificationEventsCountResponse& NotificationEventsCountResponse::operator=(const NotificationEventsCountResponse& other825) { + eventsCount = other825.eventsCount; return *this; } void NotificationEventsCountResponse::printTo(std::ostream& out) const { @@ -20170,14 +21398,14 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->filesAdded.clear(); - uint32_t _size808; - ::apache::thrift::protocol::TType _etype811; - xfer += iprot->readListBegin(_etype811, _size808); - this->filesAdded.resize(_size808); - uint32_t _i812; - for (_i812 = 0; _i812 < _size808; ++_i812) + uint32_t _size826; + ::apache::thrift::protocol::TType _etype829; + xfer += iprot->readListBegin(_etype829, _size826); + this->filesAdded.resize(_size826); + uint32_t _i830; + for (_i830 = 0; _i830 < _size826; ++_i830) { - xfer += iprot->readString(this->filesAdded[_i812]); + xfer += iprot->readString(this->filesAdded[_i830]); } xfer += iprot->readListEnd(); } @@ -20190,14 +21418,14 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->filesAddedChecksum.clear(); - uint32_t _size813; - ::apache::thrift::protocol::TType _etype816; - xfer += iprot->readListBegin(_etype816, _size813); - this->filesAddedChecksum.resize(_size813); - uint32_t _i817; - for (_i817 = 0; _i817 < _size813; ++_i817) + uint32_t _size831; + ::apache::thrift::protocol::TType _etype834; + xfer += iprot->readListBegin(_etype834, _size831); + this->filesAddedChecksum.resize(_size831); + uint32_t _i835; + for (_i835 = 0; _i835 < _size831; ++_i835) { - xfer += iprot->readString(this->filesAddedChecksum[_i817]); + xfer += iprot->readString(this->filesAddedChecksum[_i835]); } xfer += iprot->readListEnd(); } @@ -20233,10 +21461,10 @@ uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("filesAdded", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->filesAdded.size())); - std::vector ::const_iterator _iter818; - for (_iter818 = this->filesAdded.begin(); _iter818 != this->filesAdded.end(); ++_iter818) + std::vector ::const_iterator _iter836; + for (_iter836 = this->filesAdded.begin(); _iter836 != this->filesAdded.end(); ++_iter836) { - xfer += oprot->writeString((*_iter818)); + xfer += oprot->writeString((*_iter836)); } xfer += oprot->writeListEnd(); } @@ -20246,10 +21474,10 @@ uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("filesAddedChecksum", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->filesAddedChecksum.size())); - std::vector ::const_iterator _iter819; - for (_iter819 = this->filesAddedChecksum.begin(); _iter819 != this->filesAddedChecksum.end(); ++_iter819) + std::vector ::const_iterator _iter837; + for (_iter837 = this->filesAddedChecksum.begin(); _iter837 != this->filesAddedChecksum.end(); ++_iter837) { - xfer += oprot->writeString((*_iter819)); + xfer += oprot->writeString((*_iter837)); } xfer += oprot->writeListEnd(); } @@ -20268,17 +21496,17 @@ void swap(InsertEventRequestData &a, InsertEventRequestData &b) { swap(a.__isset, b.__isset); } -InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other820) { - replace = other820.replace; - filesAdded = other820.filesAdded; - filesAddedChecksum = other820.filesAddedChecksum; - __isset = other820.__isset; +InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other838) { + replace = other838.replace; + filesAdded = other838.filesAdded; + filesAddedChecksum = other838.filesAddedChecksum; + __isset = other838.__isset; } -InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other821) { - replace = other821.replace; - filesAdded = other821.filesAdded; - filesAddedChecksum = other821.filesAddedChecksum; - __isset = other821.__isset; +InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other839) { + replace = other839.replace; + filesAdded = other839.filesAdded; + filesAddedChecksum = other839.filesAddedChecksum; + __isset = other839.__isset; return *this; } void InsertEventRequestData::printTo(std::ostream& out) const { @@ -20360,13 +21588,13 @@ void swap(FireEventRequestData &a, FireEventRequestData &b) { swap(a.__isset, b.__isset); } -FireEventRequestData::FireEventRequestData(const FireEventRequestData& other822) { - insertData = other822.insertData; - __isset = other822.__isset; +FireEventRequestData::FireEventRequestData(const FireEventRequestData& other840) { + insertData = other840.insertData; + __isset = other840.__isset; } -FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other823) { - insertData = other823.insertData; - __isset = other823.__isset; +FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other841) { + insertData = other841.insertData; + __isset = other841.__isset; return *this; } void FireEventRequestData::printTo(std::ostream& out) const { @@ -20404,6 +21632,11 @@ void FireEventRequest::__set_partitionVals(const std::vector & val) __isset.partitionVals = true; } +void FireEventRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t FireEventRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -20463,14 +21696,14 @@ uint32_t FireEventRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionVals.clear(); - uint32_t _size824; - ::apache::thrift::protocol::TType _etype827; - xfer += iprot->readListBegin(_etype827, _size824); - this->partitionVals.resize(_size824); - uint32_t _i828; - for (_i828 = 0; _i828 < _size824; ++_i828) + uint32_t _size842; + ::apache::thrift::protocol::TType _etype845; + xfer += iprot->readListBegin(_etype845, _size842); + this->partitionVals.resize(_size842); + uint32_t _i846; + for (_i846 = 0; _i846 < _size842; ++_i846) { - xfer += iprot->readString(this->partitionVals[_i828]); + xfer += iprot->readString(this->partitionVals[_i846]); } xfer += iprot->readListEnd(); } @@ -20479,6 +21712,14 @@ uint32_t FireEventRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -20522,15 +21763,20 @@ uint32_t FireEventRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("partitionVals", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionVals.size())); - std::vector ::const_iterator _iter829; - for (_iter829 = this->partitionVals.begin(); _iter829 != this->partitionVals.end(); ++_iter829) + std::vector ::const_iterator _iter847; + for (_iter847 = this->partitionVals.begin(); _iter847 != this->partitionVals.end(); ++_iter847) { - xfer += oprot->writeString((*_iter829)); + xfer += oprot->writeString((*_iter847)); } xfer += oprot->writeListEnd(); } xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -20543,24 +21789,27 @@ void swap(FireEventRequest &a, FireEventRequest &b) { swap(a.dbName, b.dbName); swap(a.tableName, b.tableName); swap(a.partitionVals, b.partitionVals); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -FireEventRequest::FireEventRequest(const FireEventRequest& other830) { - successful = other830.successful; - data = other830.data; - dbName = other830.dbName; - tableName = other830.tableName; - partitionVals = other830.partitionVals; - __isset = other830.__isset; -} -FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other831) { - successful = other831.successful; - data = other831.data; - dbName = other831.dbName; - tableName = other831.tableName; - partitionVals = other831.partitionVals; - __isset = other831.__isset; +FireEventRequest::FireEventRequest(const FireEventRequest& other848) { + successful = other848.successful; + data = other848.data; + dbName = other848.dbName; + tableName = other848.tableName; + partitionVals = other848.partitionVals; + catName = other848.catName; + __isset = other848.__isset; +} +FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other849) { + successful = other849.successful; + data = other849.data; + dbName = other849.dbName; + tableName = other849.tableName; + partitionVals = other849.partitionVals; + catName = other849.catName; + __isset = other849.__isset; return *this; } void FireEventRequest::printTo(std::ostream& out) const { @@ -20571,6 +21820,7 @@ void FireEventRequest::printTo(std::ostream& out) const { out << ", " << "dbName="; (__isset.dbName ? (out << to_string(dbName)) : (out << "")); out << ", " << "tableName="; (__isset.tableName ? (out << to_string(tableName)) : (out << "")); out << ", " << "partitionVals="; (__isset.partitionVals ? (out << to_string(partitionVals)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -20623,11 +21873,11 @@ void swap(FireEventResponse &a, FireEventResponse &b) { (void) b; } -FireEventResponse::FireEventResponse(const FireEventResponse& other832) { - (void) other832; +FireEventResponse::FireEventResponse(const FireEventResponse& other850) { + (void) other850; } -FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other833) { - (void) other833; +FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other851) { + (void) other851; return *this; } void FireEventResponse::printTo(std::ostream& out) const { @@ -20727,15 +21977,15 @@ void swap(MetadataPpdResult &a, MetadataPpdResult &b) { swap(a.__isset, b.__isset); } -MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other834) { - metadata = other834.metadata; - includeBitset = other834.includeBitset; - __isset = other834.__isset; +MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other852) { + metadata = other852.metadata; + includeBitset = other852.includeBitset; + __isset = other852.__isset; } -MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other835) { - metadata = other835.metadata; - includeBitset = other835.includeBitset; - __isset = other835.__isset; +MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other853) { + metadata = other853.metadata; + includeBitset = other853.includeBitset; + __isset = other853.__isset; return *this; } void MetadataPpdResult::printTo(std::ostream& out) const { @@ -20786,17 +22036,17 @@ uint32_t GetFileMetadataByExprResult::read(::apache::thrift::protocol::TProtocol if (ftype == ::apache::thrift::protocol::T_MAP) { { this->metadata.clear(); - uint32_t _size836; - ::apache::thrift::protocol::TType _ktype837; - ::apache::thrift::protocol::TType _vtype838; - xfer += iprot->readMapBegin(_ktype837, _vtype838, _size836); - uint32_t _i840; - for (_i840 = 0; _i840 < _size836; ++_i840) + uint32_t _size854; + ::apache::thrift::protocol::TType _ktype855; + ::apache::thrift::protocol::TType _vtype856; + xfer += iprot->readMapBegin(_ktype855, _vtype856, _size854); + uint32_t _i858; + for (_i858 = 0; _i858 < _size854; ++_i858) { - int64_t _key841; - xfer += iprot->readI64(_key841); - MetadataPpdResult& _val842 = this->metadata[_key841]; - xfer += _val842.read(iprot); + int64_t _key859; + xfer += iprot->readI64(_key859); + MetadataPpdResult& _val860 = this->metadata[_key859]; + xfer += _val860.read(iprot); } xfer += iprot->readMapEnd(); } @@ -20837,11 +22087,11 @@ uint32_t GetFileMetadataByExprResult::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRUCT, static_cast(this->metadata.size())); - std::map ::const_iterator _iter843; - for (_iter843 = this->metadata.begin(); _iter843 != this->metadata.end(); ++_iter843) + std::map ::const_iterator _iter861; + for (_iter861 = this->metadata.begin(); _iter861 != this->metadata.end(); ++_iter861) { - xfer += oprot->writeI64(_iter843->first); - xfer += _iter843->second.write(oprot); + xfer += oprot->writeI64(_iter861->first); + xfer += _iter861->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -20862,13 +22112,13 @@ void swap(GetFileMetadataByExprResult &a, GetFileMetadataByExprResult &b) { swap(a.isSupported, b.isSupported); } -GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other844) { - metadata = other844.metadata; - isSupported = other844.isSupported; +GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other862) { + metadata = other862.metadata; + isSupported = other862.isSupported; } -GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other845) { - metadata = other845.metadata; - isSupported = other845.isSupported; +GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other863) { + metadata = other863.metadata; + isSupported = other863.isSupported; return *this; } void GetFileMetadataByExprResult::printTo(std::ostream& out) const { @@ -20929,14 +22179,14 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size846; - ::apache::thrift::protocol::TType _etype849; - xfer += iprot->readListBegin(_etype849, _size846); - this->fileIds.resize(_size846); - uint32_t _i850; - for (_i850 = 0; _i850 < _size846; ++_i850) + uint32_t _size864; + ::apache::thrift::protocol::TType _etype867; + xfer += iprot->readListBegin(_etype867, _size864); + this->fileIds.resize(_size864); + uint32_t _i868; + for (_i868 = 0; _i868 < _size864; ++_i868) { - xfer += iprot->readI64(this->fileIds[_i850]); + xfer += iprot->readI64(this->fileIds[_i868]); } xfer += iprot->readListEnd(); } @@ -20963,9 +22213,9 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast851; - xfer += iprot->readI32(ecast851); - this->type = (FileMetadataExprType::type)ecast851; + int32_t ecast869; + xfer += iprot->readI32(ecast869); + this->type = (FileMetadataExprType::type)ecast869; this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -20995,10 +22245,10 @@ uint32_t GetFileMetadataByExprRequest::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter852; - for (_iter852 = this->fileIds.begin(); _iter852 != this->fileIds.end(); ++_iter852) + std::vector ::const_iterator _iter870; + for (_iter870 = this->fileIds.begin(); _iter870 != this->fileIds.end(); ++_iter870) { - xfer += oprot->writeI64((*_iter852)); + xfer += oprot->writeI64((*_iter870)); } xfer += oprot->writeListEnd(); } @@ -21032,19 +22282,19 @@ void swap(GetFileMetadataByExprRequest &a, GetFileMetadataByExprRequest &b) { swap(a.__isset, b.__isset); } -GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other853) { - fileIds = other853.fileIds; - expr = other853.expr; - doGetFooters = other853.doGetFooters; - type = other853.type; - __isset = other853.__isset; +GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other871) { + fileIds = other871.fileIds; + expr = other871.expr; + doGetFooters = other871.doGetFooters; + type = other871.type; + __isset = other871.__isset; } -GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other854) { - fileIds = other854.fileIds; - expr = other854.expr; - doGetFooters = other854.doGetFooters; - type = other854.type; - __isset = other854.__isset; +GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other872) { + fileIds = other872.fileIds; + expr = other872.expr; + doGetFooters = other872.doGetFooters; + type = other872.type; + __isset = other872.__isset; return *this; } void GetFileMetadataByExprRequest::printTo(std::ostream& out) const { @@ -21097,17 +22347,17 @@ uint32_t GetFileMetadataResult::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->metadata.clear(); - uint32_t _size855; - ::apache::thrift::protocol::TType _ktype856; - ::apache::thrift::protocol::TType _vtype857; - xfer += iprot->readMapBegin(_ktype856, _vtype857, _size855); - uint32_t _i859; - for (_i859 = 0; _i859 < _size855; ++_i859) + uint32_t _size873; + ::apache::thrift::protocol::TType _ktype874; + ::apache::thrift::protocol::TType _vtype875; + xfer += iprot->readMapBegin(_ktype874, _vtype875, _size873); + uint32_t _i877; + for (_i877 = 0; _i877 < _size873; ++_i877) { - int64_t _key860; - xfer += iprot->readI64(_key860); - std::string& _val861 = this->metadata[_key860]; - xfer += iprot->readBinary(_val861); + int64_t _key878; + xfer += iprot->readI64(_key878); + std::string& _val879 = this->metadata[_key878]; + xfer += iprot->readBinary(_val879); } xfer += iprot->readMapEnd(); } @@ -21148,11 +22398,11 @@ uint32_t GetFileMetadataResult::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); - std::map ::const_iterator _iter862; - for (_iter862 = this->metadata.begin(); _iter862 != this->metadata.end(); ++_iter862) + std::map ::const_iterator _iter880; + for (_iter880 = this->metadata.begin(); _iter880 != this->metadata.end(); ++_iter880) { - xfer += oprot->writeI64(_iter862->first); - xfer += oprot->writeBinary(_iter862->second); + xfer += oprot->writeI64(_iter880->first); + xfer += oprot->writeBinary(_iter880->second); } xfer += oprot->writeMapEnd(); } @@ -21173,13 +22423,13 @@ void swap(GetFileMetadataResult &a, GetFileMetadataResult &b) { swap(a.isSupported, b.isSupported); } -GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other863) { - metadata = other863.metadata; - isSupported = other863.isSupported; +GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other881) { + metadata = other881.metadata; + isSupported = other881.isSupported; } -GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other864) { - metadata = other864.metadata; - isSupported = other864.isSupported; +GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other882) { + metadata = other882.metadata; + isSupported = other882.isSupported; return *this; } void GetFileMetadataResult::printTo(std::ostream& out) const { @@ -21225,14 +22475,14 @@ uint32_t GetFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size865; - ::apache::thrift::protocol::TType _etype868; - xfer += iprot->readListBegin(_etype868, _size865); - this->fileIds.resize(_size865); - uint32_t _i869; - for (_i869 = 0; _i869 < _size865; ++_i869) + uint32_t _size883; + ::apache::thrift::protocol::TType _etype886; + xfer += iprot->readListBegin(_etype886, _size883); + this->fileIds.resize(_size883); + uint32_t _i887; + for (_i887 = 0; _i887 < _size883; ++_i887) { - xfer += iprot->readI64(this->fileIds[_i869]); + xfer += iprot->readI64(this->fileIds[_i887]); } xfer += iprot->readListEnd(); } @@ -21263,10 +22513,10 @@ uint32_t GetFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter870; - for (_iter870 = this->fileIds.begin(); _iter870 != this->fileIds.end(); ++_iter870) + std::vector ::const_iterator _iter888; + for (_iter888 = this->fileIds.begin(); _iter888 != this->fileIds.end(); ++_iter888) { - xfer += oprot->writeI64((*_iter870)); + xfer += oprot->writeI64((*_iter888)); } xfer += oprot->writeListEnd(); } @@ -21282,11 +22532,11 @@ void swap(GetFileMetadataRequest &a, GetFileMetadataRequest &b) { swap(a.fileIds, b.fileIds); } -GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other871) { - fileIds = other871.fileIds; +GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other889) { + fileIds = other889.fileIds; } -GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other872) { - fileIds = other872.fileIds; +GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other890) { + fileIds = other890.fileIds; return *this; } void GetFileMetadataRequest::printTo(std::ostream& out) const { @@ -21345,11 +22595,11 @@ void swap(PutFileMetadataResult &a, PutFileMetadataResult &b) { (void) b; } -PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other873) { - (void) other873; +PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other891) { + (void) other891; } -PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other874) { - (void) other874; +PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other892) { + (void) other892; return *this; } void PutFileMetadataResult::printTo(std::ostream& out) const { @@ -21403,14 +22653,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size875; - ::apache::thrift::protocol::TType _etype878; - xfer += iprot->readListBegin(_etype878, _size875); - this->fileIds.resize(_size875); - uint32_t _i879; - for (_i879 = 0; _i879 < _size875; ++_i879) + uint32_t _size893; + ::apache::thrift::protocol::TType _etype896; + xfer += iprot->readListBegin(_etype896, _size893); + this->fileIds.resize(_size893); + uint32_t _i897; + for (_i897 = 0; _i897 < _size893; ++_i897) { - xfer += iprot->readI64(this->fileIds[_i879]); + xfer += iprot->readI64(this->fileIds[_i897]); } xfer += iprot->readListEnd(); } @@ -21423,14 +22673,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->metadata.clear(); - uint32_t _size880; - ::apache::thrift::protocol::TType _etype883; - xfer += iprot->readListBegin(_etype883, _size880); - this->metadata.resize(_size880); - uint32_t _i884; - for (_i884 = 0; _i884 < _size880; ++_i884) + uint32_t _size898; + ::apache::thrift::protocol::TType _etype901; + xfer += iprot->readListBegin(_etype901, _size898); + this->metadata.resize(_size898); + uint32_t _i902; + for (_i902 = 0; _i902 < _size898; ++_i902) { - xfer += iprot->readBinary(this->metadata[_i884]); + xfer += iprot->readBinary(this->metadata[_i902]); } xfer += iprot->readListEnd(); } @@ -21441,9 +22691,9 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast885; - xfer += iprot->readI32(ecast885); - this->type = (FileMetadataExprType::type)ecast885; + int32_t ecast903; + xfer += iprot->readI32(ecast903); + this->type = (FileMetadataExprType::type)ecast903; this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -21473,10 +22723,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter886; - for (_iter886 = this->fileIds.begin(); _iter886 != this->fileIds.end(); ++_iter886) + std::vector ::const_iterator _iter904; + for (_iter904 = this->fileIds.begin(); _iter904 != this->fileIds.end(); ++_iter904) { - xfer += oprot->writeI64((*_iter886)); + xfer += oprot->writeI64((*_iter904)); } xfer += oprot->writeListEnd(); } @@ -21485,10 +22735,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); - std::vector ::const_iterator _iter887; - for (_iter887 = this->metadata.begin(); _iter887 != this->metadata.end(); ++_iter887) + std::vector ::const_iterator _iter905; + for (_iter905 = this->metadata.begin(); _iter905 != this->metadata.end(); ++_iter905) { - xfer += oprot->writeBinary((*_iter887)); + xfer += oprot->writeBinary((*_iter905)); } xfer += oprot->writeListEnd(); } @@ -21512,17 +22762,17 @@ void swap(PutFileMetadataRequest &a, PutFileMetadataRequest &b) { swap(a.__isset, b.__isset); } -PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other888) { - fileIds = other888.fileIds; - metadata = other888.metadata; - type = other888.type; - __isset = other888.__isset; +PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other906) { + fileIds = other906.fileIds; + metadata = other906.metadata; + type = other906.type; + __isset = other906.__isset; } -PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other889) { - fileIds = other889.fileIds; - metadata = other889.metadata; - type = other889.type; - __isset = other889.__isset; +PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other907) { + fileIds = other907.fileIds; + metadata = other907.metadata; + type = other907.type; + __isset = other907.__isset; return *this; } void PutFileMetadataRequest::printTo(std::ostream& out) const { @@ -21583,11 +22833,11 @@ void swap(ClearFileMetadataResult &a, ClearFileMetadataResult &b) { (void) b; } -ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other890) { - (void) other890; +ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other908) { + (void) other908; } -ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other891) { - (void) other891; +ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other909) { + (void) other909; return *this; } void ClearFileMetadataResult::printTo(std::ostream& out) const { @@ -21631,14 +22881,14 @@ uint32_t ClearFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* i if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size892; - ::apache::thrift::protocol::TType _etype895; - xfer += iprot->readListBegin(_etype895, _size892); - this->fileIds.resize(_size892); - uint32_t _i896; - for (_i896 = 0; _i896 < _size892; ++_i896) + uint32_t _size910; + ::apache::thrift::protocol::TType _etype913; + xfer += iprot->readListBegin(_etype913, _size910); + this->fileIds.resize(_size910); + uint32_t _i914; + for (_i914 = 0; _i914 < _size910; ++_i914) { - xfer += iprot->readI64(this->fileIds[_i896]); + xfer += iprot->readI64(this->fileIds[_i914]); } xfer += iprot->readListEnd(); } @@ -21669,10 +22919,10 @@ uint32_t ClearFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter897; - for (_iter897 = this->fileIds.begin(); _iter897 != this->fileIds.end(); ++_iter897) + std::vector ::const_iterator _iter915; + for (_iter915 = this->fileIds.begin(); _iter915 != this->fileIds.end(); ++_iter915) { - xfer += oprot->writeI64((*_iter897)); + xfer += oprot->writeI64((*_iter915)); } xfer += oprot->writeListEnd(); } @@ -21688,11 +22938,11 @@ void swap(ClearFileMetadataRequest &a, ClearFileMetadataRequest &b) { swap(a.fileIds, b.fileIds); } -ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other898) { - fileIds = other898.fileIds; +ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other916) { + fileIds = other916.fileIds; } -ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other899) { - fileIds = other899.fileIds; +ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other917) { + fileIds = other917.fileIds; return *this; } void ClearFileMetadataRequest::printTo(std::ostream& out) const { @@ -21774,11 +23024,11 @@ void swap(CacheFileMetadataResult &a, CacheFileMetadataResult &b) { swap(a.isSupported, b.isSupported); } -CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other900) { - isSupported = other900.isSupported; +CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other918) { + isSupported = other918.isSupported; } -CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other901) { - isSupported = other901.isSupported; +CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other919) { + isSupported = other919.isSupported; return *this; } void CacheFileMetadataResult::printTo(std::ostream& out) const { @@ -21919,19 +23169,19 @@ void swap(CacheFileMetadataRequest &a, CacheFileMetadataRequest &b) { swap(a.__isset, b.__isset); } -CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other902) { - dbName = other902.dbName; - tblName = other902.tblName; - partName = other902.partName; - isAllParts = other902.isAllParts; - __isset = other902.__isset; +CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other920) { + dbName = other920.dbName; + tblName = other920.tblName; + partName = other920.partName; + isAllParts = other920.isAllParts; + __isset = other920.__isset; } -CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other903) { - dbName = other903.dbName; - tblName = other903.tblName; - partName = other903.partName; - isAllParts = other903.isAllParts; - __isset = other903.__isset; +CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other921) { + dbName = other921.dbName; + tblName = other921.tblName; + partName = other921.partName; + isAllParts = other921.isAllParts; + __isset = other921.__isset; return *this; } void CacheFileMetadataRequest::printTo(std::ostream& out) const { @@ -21979,14 +23229,14 @@ uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->functions.clear(); - uint32_t _size904; - ::apache::thrift::protocol::TType _etype907; - xfer += iprot->readListBegin(_etype907, _size904); - this->functions.resize(_size904); - uint32_t _i908; - for (_i908 = 0; _i908 < _size904; ++_i908) + uint32_t _size922; + ::apache::thrift::protocol::TType _etype925; + xfer += iprot->readListBegin(_etype925, _size922); + this->functions.resize(_size922); + uint32_t _i926; + for (_i926 = 0; _i926 < _size922; ++_i926) { - xfer += this->functions[_i908].read(iprot); + xfer += this->functions[_i926].read(iprot); } xfer += iprot->readListEnd(); } @@ -22016,10 +23266,10 @@ uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->functions.size())); - std::vector ::const_iterator _iter909; - for (_iter909 = this->functions.begin(); _iter909 != this->functions.end(); ++_iter909) + std::vector ::const_iterator _iter927; + for (_iter927 = this->functions.begin(); _iter927 != this->functions.end(); ++_iter927) { - xfer += (*_iter909).write(oprot); + xfer += (*_iter927).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22036,13 +23286,13 @@ void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) { swap(a.__isset, b.__isset); } -GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other910) { - functions = other910.functions; - __isset = other910.__isset; +GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other928) { + functions = other928.functions; + __isset = other928.__isset; } -GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other911) { - functions = other911.functions; - __isset = other911.__isset; +GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other929) { + functions = other929.functions; + __isset = other929.__isset; return *this; } void GetAllFunctionsResponse::printTo(std::ostream& out) const { @@ -22087,16 +23337,16 @@ uint32_t ClientCapabilities::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size912; - ::apache::thrift::protocol::TType _etype915; - xfer += iprot->readListBegin(_etype915, _size912); - this->values.resize(_size912); - uint32_t _i916; - for (_i916 = 0; _i916 < _size912; ++_i916) + uint32_t _size930; + ::apache::thrift::protocol::TType _etype933; + xfer += iprot->readListBegin(_etype933, _size930); + this->values.resize(_size930); + uint32_t _i934; + for (_i934 = 0; _i934 < _size930; ++_i934) { - int32_t ecast917; - xfer += iprot->readI32(ecast917); - this->values[_i916] = (ClientCapability::type)ecast917; + int32_t ecast935; + xfer += iprot->readI32(ecast935); + this->values[_i934] = (ClientCapability::type)ecast935; } xfer += iprot->readListEnd(); } @@ -22127,10 +23377,10 @@ uint32_t ClientCapabilities::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I32, static_cast(this->values.size())); - std::vector ::const_iterator _iter918; - for (_iter918 = this->values.begin(); _iter918 != this->values.end(); ++_iter918) + std::vector ::const_iterator _iter936; + for (_iter936 = this->values.begin(); _iter936 != this->values.end(); ++_iter936) { - xfer += oprot->writeI32((int32_t)(*_iter918)); + xfer += oprot->writeI32((int32_t)(*_iter936)); } xfer += oprot->writeListEnd(); } @@ -22146,11 +23396,11 @@ void swap(ClientCapabilities &a, ClientCapabilities &b) { swap(a.values, b.values); } -ClientCapabilities::ClientCapabilities(const ClientCapabilities& other919) { - values = other919.values; +ClientCapabilities::ClientCapabilities(const ClientCapabilities& other937) { + values = other937.values; } -ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other920) { - values = other920.values; +ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other938) { + values = other938.values; return *this; } void ClientCapabilities::printTo(std::ostream& out) const { @@ -22178,6 +23428,11 @@ void GetTableRequest::__set_capabilities(const ClientCapabilities& val) { __isset.capabilities = true; } +void GetTableRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t GetTableRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -22225,6 +23480,14 @@ uint32_t GetTableRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -22259,6 +23522,11 @@ uint32_t GetTableRequest::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += this->capabilities.write(oprot); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -22269,20 +23537,23 @@ void swap(GetTableRequest &a, GetTableRequest &b) { swap(a.dbName, b.dbName); swap(a.tblName, b.tblName); swap(a.capabilities, b.capabilities); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -GetTableRequest::GetTableRequest(const GetTableRequest& other921) { - dbName = other921.dbName; - tblName = other921.tblName; - capabilities = other921.capabilities; - __isset = other921.__isset; +GetTableRequest::GetTableRequest(const GetTableRequest& other939) { + dbName = other939.dbName; + tblName = other939.tblName; + capabilities = other939.capabilities; + catName = other939.catName; + __isset = other939.__isset; } -GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other922) { - dbName = other922.dbName; - tblName = other922.tblName; - capabilities = other922.capabilities; - __isset = other922.__isset; +GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other940) { + dbName = other940.dbName; + tblName = other940.tblName; + capabilities = other940.capabilities; + catName = other940.catName; + __isset = other940.__isset; return *this; } void GetTableRequest::printTo(std::ostream& out) const { @@ -22291,6 +23562,7 @@ void GetTableRequest::printTo(std::ostream& out) const { out << "dbName=" << to_string(dbName); out << ", " << "tblName=" << to_string(tblName); out << ", " << "capabilities="; (__isset.capabilities ? (out << to_string(capabilities)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -22366,11 +23638,11 @@ void swap(GetTableResult &a, GetTableResult &b) { swap(a.table, b.table); } -GetTableResult::GetTableResult(const GetTableResult& other923) { - table = other923.table; +GetTableResult::GetTableResult(const GetTableResult& other941) { + table = other941.table; } -GetTableResult& GetTableResult::operator=(const GetTableResult& other924) { - table = other924.table; +GetTableResult& GetTableResult::operator=(const GetTableResult& other942) { + table = other942.table; return *this; } void GetTableResult::printTo(std::ostream& out) const { @@ -22399,6 +23671,11 @@ void GetTablesRequest::__set_capabilities(const ClientCapabilities& val) { __isset.capabilities = true; } +void GetTablesRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t GetTablesRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -22433,14 +23710,14 @@ uint32_t GetTablesRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tblNames.clear(); - uint32_t _size925; - ::apache::thrift::protocol::TType _etype928; - xfer += iprot->readListBegin(_etype928, _size925); - this->tblNames.resize(_size925); - uint32_t _i929; - for (_i929 = 0; _i929 < _size925; ++_i929) + uint32_t _size943; + ::apache::thrift::protocol::TType _etype946; + xfer += iprot->readListBegin(_etype946, _size943); + this->tblNames.resize(_size943); + uint32_t _i947; + for (_i947 = 0; _i947 < _size943; ++_i947) { - xfer += iprot->readString(this->tblNames[_i929]); + xfer += iprot->readString(this->tblNames[_i947]); } xfer += iprot->readListEnd(); } @@ -22457,6 +23734,14 @@ uint32_t GetTablesRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -22484,10 +23769,10 @@ uint32_t GetTablesRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("tblNames", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tblNames.size())); - std::vector ::const_iterator _iter930; - for (_iter930 = this->tblNames.begin(); _iter930 != this->tblNames.end(); ++_iter930) + std::vector ::const_iterator _iter948; + for (_iter948 = this->tblNames.begin(); _iter948 != this->tblNames.end(); ++_iter948) { - xfer += oprot->writeString((*_iter930)); + xfer += oprot->writeString((*_iter948)); } xfer += oprot->writeListEnd(); } @@ -22498,6 +23783,11 @@ uint32_t GetTablesRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += this->capabilities.write(oprot); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -22508,20 +23798,23 @@ void swap(GetTablesRequest &a, GetTablesRequest &b) { swap(a.dbName, b.dbName); swap(a.tblNames, b.tblNames); swap(a.capabilities, b.capabilities); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -GetTablesRequest::GetTablesRequest(const GetTablesRequest& other931) { - dbName = other931.dbName; - tblNames = other931.tblNames; - capabilities = other931.capabilities; - __isset = other931.__isset; +GetTablesRequest::GetTablesRequest(const GetTablesRequest& other949) { + dbName = other949.dbName; + tblNames = other949.tblNames; + capabilities = other949.capabilities; + catName = other949.catName; + __isset = other949.__isset; } -GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other932) { - dbName = other932.dbName; - tblNames = other932.tblNames; - capabilities = other932.capabilities; - __isset = other932.__isset; +GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other950) { + dbName = other950.dbName; + tblNames = other950.tblNames; + capabilities = other950.capabilities; + catName = other950.catName; + __isset = other950.__isset; return *this; } void GetTablesRequest::printTo(std::ostream& out) const { @@ -22530,6 +23823,7 @@ void GetTablesRequest::printTo(std::ostream& out) const { out << "dbName=" << to_string(dbName); out << ", " << "tblNames="; (__isset.tblNames ? (out << to_string(tblNames)) : (out << "")); out << ", " << "capabilities="; (__isset.capabilities ? (out << to_string(capabilities)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -22568,14 +23862,14 @@ uint32_t GetTablesResult::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tables.clear(); - uint32_t _size933; - ::apache::thrift::protocol::TType _etype936; - xfer += iprot->readListBegin(_etype936, _size933); - this->tables.resize(_size933); - uint32_t _i937; - for (_i937 = 0; _i937 < _size933; ++_i937) + uint32_t _size951; + ::apache::thrift::protocol::TType _etype954; + xfer += iprot->readListBegin(_etype954, _size951); + this->tables.resize(_size951); + uint32_t _i955; + for (_i955 = 0; _i955 < _size951; ++_i955) { - xfer += this->tables[_i937].read(iprot); + xfer += this->tables[_i955].read(iprot); } xfer += iprot->readListEnd(); } @@ -22606,10 +23900,10 @@ uint32_t GetTablesResult::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += oprot->writeFieldBegin("tables", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tables.size())); - std::vector
::const_iterator _iter938; - for (_iter938 = this->tables.begin(); _iter938 != this->tables.end(); ++_iter938) + std::vector
::const_iterator _iter956; + for (_iter956 = this->tables.begin(); _iter956 != this->tables.end(); ++_iter956) { - xfer += (*_iter938).write(oprot); + xfer += (*_iter956).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22625,11 +23919,11 @@ void swap(GetTablesResult &a, GetTablesResult &b) { swap(a.tables, b.tables); } -GetTablesResult::GetTablesResult(const GetTablesResult& other939) { - tables = other939.tables; +GetTablesResult::GetTablesResult(const GetTablesResult& other957) { + tables = other957.tables; } -GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other940) { - tables = other940.tables; +GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other958) { + tables = other958.tables; return *this; } void GetTablesResult::printTo(std::ostream& out) const { @@ -22731,13 +24025,13 @@ void swap(CmRecycleRequest &a, CmRecycleRequest &b) { swap(a.purge, b.purge); } -CmRecycleRequest::CmRecycleRequest(const CmRecycleRequest& other941) { - dataPath = other941.dataPath; - purge = other941.purge; +CmRecycleRequest::CmRecycleRequest(const CmRecycleRequest& other959) { + dataPath = other959.dataPath; + purge = other959.purge; } -CmRecycleRequest& CmRecycleRequest::operator=(const CmRecycleRequest& other942) { - dataPath = other942.dataPath; - purge = other942.purge; +CmRecycleRequest& CmRecycleRequest::operator=(const CmRecycleRequest& other960) { + dataPath = other960.dataPath; + purge = other960.purge; return *this; } void CmRecycleRequest::printTo(std::ostream& out) const { @@ -22797,11 +24091,11 @@ void swap(CmRecycleResponse &a, CmRecycleResponse &b) { (void) b; } -CmRecycleResponse::CmRecycleResponse(const CmRecycleResponse& other943) { - (void) other943; +CmRecycleResponse::CmRecycleResponse(const CmRecycleResponse& other961) { + (void) other961; } -CmRecycleResponse& CmRecycleResponse::operator=(const CmRecycleResponse& other944) { - (void) other944; +CmRecycleResponse& CmRecycleResponse::operator=(const CmRecycleResponse& other962) { + (void) other962; return *this; } void CmRecycleResponse::printTo(std::ostream& out) const { @@ -22832,6 +24126,11 @@ void TableMeta::__set_comments(const std::string& val) { __isset.comments = true; } +void TableMeta::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t TableMeta::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -22888,6 +24187,14 @@ uint32_t TableMeta::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -22928,6 +24235,11 @@ uint32_t TableMeta::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeString(this->comments); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -22939,22 +24251,25 @@ void swap(TableMeta &a, TableMeta &b) { swap(a.tableName, b.tableName); swap(a.tableType, b.tableType); swap(a.comments, b.comments); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -TableMeta::TableMeta(const TableMeta& other945) { - dbName = other945.dbName; - tableName = other945.tableName; - tableType = other945.tableType; - comments = other945.comments; - __isset = other945.__isset; +TableMeta::TableMeta(const TableMeta& other963) { + dbName = other963.dbName; + tableName = other963.tableName; + tableType = other963.tableType; + comments = other963.comments; + catName = other963.catName; + __isset = other963.__isset; } -TableMeta& TableMeta::operator=(const TableMeta& other946) { - dbName = other946.dbName; - tableName = other946.tableName; - tableType = other946.tableType; - comments = other946.comments; - __isset = other946.__isset; +TableMeta& TableMeta::operator=(const TableMeta& other964) { + dbName = other964.dbName; + tableName = other964.tableName; + tableType = other964.tableType; + comments = other964.comments; + catName = other964.catName; + __isset = other964.__isset; return *this; } void TableMeta::printTo(std::ostream& out) const { @@ -22964,6 +24279,7 @@ void TableMeta::printTo(std::ostream& out) const { out << ", " << "tableName=" << to_string(tableName); out << ", " << "tableType=" << to_string(tableType); out << ", " << "comments="; (__isset.comments ? (out << to_string(comments)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -23012,15 +24328,15 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_SET) { { this->tablesUsed.clear(); - uint32_t _size947; - ::apache::thrift::protocol::TType _etype950; - xfer += iprot->readSetBegin(_etype950, _size947); - uint32_t _i951; - for (_i951 = 0; _i951 < _size947; ++_i951) + uint32_t _size965; + ::apache::thrift::protocol::TType _etype968; + xfer += iprot->readSetBegin(_etype968, _size965); + uint32_t _i969; + for (_i969 = 0; _i969 < _size965; ++_i969) { - std::string _elem952; - xfer += iprot->readString(_elem952); - this->tablesUsed.insert(_elem952); + std::string _elem970; + xfer += iprot->readString(_elem970); + this->tablesUsed.insert(_elem970); } xfer += iprot->readSetEnd(); } @@ -23069,10 +24385,10 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 1); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tablesUsed.size())); - std::set ::const_iterator _iter953; - for (_iter953 = this->tablesUsed.begin(); _iter953 != this->tablesUsed.end(); ++_iter953) + std::set ::const_iterator _iter971; + for (_iter971 = this->tablesUsed.begin(); _iter971 != this->tablesUsed.end(); ++_iter971) { - xfer += oprot->writeString((*_iter953)); + xfer += oprot->writeString((*_iter971)); } xfer += oprot->writeSetEnd(); } @@ -23100,17 +24416,17 @@ void swap(Materialization &a, Materialization &b) { swap(a.__isset, b.__isset); } -Materialization::Materialization(const Materialization& other954) { - tablesUsed = other954.tablesUsed; - validTxnList = other954.validTxnList; - invalidationTime = other954.invalidationTime; - __isset = other954.__isset; +Materialization::Materialization(const Materialization& other972) { + tablesUsed = other972.tablesUsed; + validTxnList = other972.validTxnList; + invalidationTime = other972.invalidationTime; + __isset = other972.__isset; } -Materialization& Materialization::operator=(const Materialization& other955) { - tablesUsed = other955.tablesUsed; - validTxnList = other955.validTxnList; - invalidationTime = other955.invalidationTime; - __isset = other955.__isset; +Materialization& Materialization::operator=(const Materialization& other973) { + tablesUsed = other973.tablesUsed; + validTxnList = other973.validTxnList; + invalidationTime = other973.invalidationTime; + __isset = other973.__isset; return *this; } void Materialization::printTo(std::ostream& out) const { @@ -23178,9 +24494,9 @@ uint32_t WMResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast956; - xfer += iprot->readI32(ecast956); - this->status = (WMResourcePlanStatus::type)ecast956; + int32_t ecast974; + xfer += iprot->readI32(ecast974); + this->status = (WMResourcePlanStatus::type)ecast974; this->__isset.status = true; } else { xfer += iprot->skip(ftype); @@ -23254,19 +24570,19 @@ void swap(WMResourcePlan &a, WMResourcePlan &b) { swap(a.__isset, b.__isset); } -WMResourcePlan::WMResourcePlan(const WMResourcePlan& other957) { - name = other957.name; - status = other957.status; - queryParallelism = other957.queryParallelism; - defaultPoolPath = other957.defaultPoolPath; - __isset = other957.__isset; +WMResourcePlan::WMResourcePlan(const WMResourcePlan& other975) { + name = other975.name; + status = other975.status; + queryParallelism = other975.queryParallelism; + defaultPoolPath = other975.defaultPoolPath; + __isset = other975.__isset; } -WMResourcePlan& WMResourcePlan::operator=(const WMResourcePlan& other958) { - name = other958.name; - status = other958.status; - queryParallelism = other958.queryParallelism; - defaultPoolPath = other958.defaultPoolPath; - __isset = other958.__isset; +WMResourcePlan& WMResourcePlan::operator=(const WMResourcePlan& other976) { + name = other976.name; + status = other976.status; + queryParallelism = other976.queryParallelism; + defaultPoolPath = other976.defaultPoolPath; + __isset = other976.__isset; return *this; } void WMResourcePlan::printTo(std::ostream& out) const { @@ -23345,9 +24661,9 @@ uint32_t WMNullableResourcePlan::read(::apache::thrift::protocol::TProtocol* ipr break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast959; - xfer += iprot->readI32(ecast959); - this->status = (WMResourcePlanStatus::type)ecast959; + int32_t ecast977; + xfer += iprot->readI32(ecast977); + this->status = (WMResourcePlanStatus::type)ecast977; this->__isset.status = true; } else { xfer += iprot->skip(ftype); @@ -23449,23 +24765,23 @@ void swap(WMNullableResourcePlan &a, WMNullableResourcePlan &b) { swap(a.__isset, b.__isset); } -WMNullableResourcePlan::WMNullableResourcePlan(const WMNullableResourcePlan& other960) { - name = other960.name; - status = other960.status; - queryParallelism = other960.queryParallelism; - isSetQueryParallelism = other960.isSetQueryParallelism; - defaultPoolPath = other960.defaultPoolPath; - isSetDefaultPoolPath = other960.isSetDefaultPoolPath; - __isset = other960.__isset; -} -WMNullableResourcePlan& WMNullableResourcePlan::operator=(const WMNullableResourcePlan& other961) { - name = other961.name; - status = other961.status; - queryParallelism = other961.queryParallelism; - isSetQueryParallelism = other961.isSetQueryParallelism; - defaultPoolPath = other961.defaultPoolPath; - isSetDefaultPoolPath = other961.isSetDefaultPoolPath; - __isset = other961.__isset; +WMNullableResourcePlan::WMNullableResourcePlan(const WMNullableResourcePlan& other978) { + name = other978.name; + status = other978.status; + queryParallelism = other978.queryParallelism; + isSetQueryParallelism = other978.isSetQueryParallelism; + defaultPoolPath = other978.defaultPoolPath; + isSetDefaultPoolPath = other978.isSetDefaultPoolPath; + __isset = other978.__isset; +} +WMNullableResourcePlan& WMNullableResourcePlan::operator=(const WMNullableResourcePlan& other979) { + name = other979.name; + status = other979.status; + queryParallelism = other979.queryParallelism; + isSetQueryParallelism = other979.isSetQueryParallelism; + defaultPoolPath = other979.defaultPoolPath; + isSetDefaultPoolPath = other979.isSetDefaultPoolPath; + __isset = other979.__isset; return *this; } void WMNullableResourcePlan::printTo(std::ostream& out) const { @@ -23630,21 +24946,21 @@ void swap(WMPool &a, WMPool &b) { swap(a.__isset, b.__isset); } -WMPool::WMPool(const WMPool& other962) { - resourcePlanName = other962.resourcePlanName; - poolPath = other962.poolPath; - allocFraction = other962.allocFraction; - queryParallelism = other962.queryParallelism; - schedulingPolicy = other962.schedulingPolicy; - __isset = other962.__isset; -} -WMPool& WMPool::operator=(const WMPool& other963) { - resourcePlanName = other963.resourcePlanName; - poolPath = other963.poolPath; - allocFraction = other963.allocFraction; - queryParallelism = other963.queryParallelism; - schedulingPolicy = other963.schedulingPolicy; - __isset = other963.__isset; +WMPool::WMPool(const WMPool& other980) { + resourcePlanName = other980.resourcePlanName; + poolPath = other980.poolPath; + allocFraction = other980.allocFraction; + queryParallelism = other980.queryParallelism; + schedulingPolicy = other980.schedulingPolicy; + __isset = other980.__isset; +} +WMPool& WMPool::operator=(const WMPool& other981) { + resourcePlanName = other981.resourcePlanName; + poolPath = other981.poolPath; + allocFraction = other981.allocFraction; + queryParallelism = other981.queryParallelism; + schedulingPolicy = other981.schedulingPolicy; + __isset = other981.__isset; return *this; } void WMPool::printTo(std::ostream& out) const { @@ -23827,23 +25143,23 @@ void swap(WMNullablePool &a, WMNullablePool &b) { swap(a.__isset, b.__isset); } -WMNullablePool::WMNullablePool(const WMNullablePool& other964) { - resourcePlanName = other964.resourcePlanName; - poolPath = other964.poolPath; - allocFraction = other964.allocFraction; - queryParallelism = other964.queryParallelism; - schedulingPolicy = other964.schedulingPolicy; - isSetSchedulingPolicy = other964.isSetSchedulingPolicy; - __isset = other964.__isset; -} -WMNullablePool& WMNullablePool::operator=(const WMNullablePool& other965) { - resourcePlanName = other965.resourcePlanName; - poolPath = other965.poolPath; - allocFraction = other965.allocFraction; - queryParallelism = other965.queryParallelism; - schedulingPolicy = other965.schedulingPolicy; - isSetSchedulingPolicy = other965.isSetSchedulingPolicy; - __isset = other965.__isset; +WMNullablePool::WMNullablePool(const WMNullablePool& other982) { + resourcePlanName = other982.resourcePlanName; + poolPath = other982.poolPath; + allocFraction = other982.allocFraction; + queryParallelism = other982.queryParallelism; + schedulingPolicy = other982.schedulingPolicy; + isSetSchedulingPolicy = other982.isSetSchedulingPolicy; + __isset = other982.__isset; +} +WMNullablePool& WMNullablePool::operator=(const WMNullablePool& other983) { + resourcePlanName = other983.resourcePlanName; + poolPath = other983.poolPath; + allocFraction = other983.allocFraction; + queryParallelism = other983.queryParallelism; + schedulingPolicy = other983.schedulingPolicy; + isSetSchedulingPolicy = other983.isSetSchedulingPolicy; + __isset = other983.__isset; return *this; } void WMNullablePool::printTo(std::ostream& out) const { @@ -24008,21 +25324,21 @@ void swap(WMTrigger &a, WMTrigger &b) { swap(a.__isset, b.__isset); } -WMTrigger::WMTrigger(const WMTrigger& other966) { - resourcePlanName = other966.resourcePlanName; - triggerName = other966.triggerName; - triggerExpression = other966.triggerExpression; - actionExpression = other966.actionExpression; - isInUnmanaged = other966.isInUnmanaged; - __isset = other966.__isset; -} -WMTrigger& WMTrigger::operator=(const WMTrigger& other967) { - resourcePlanName = other967.resourcePlanName; - triggerName = other967.triggerName; - triggerExpression = other967.triggerExpression; - actionExpression = other967.actionExpression; - isInUnmanaged = other967.isInUnmanaged; - __isset = other967.__isset; +WMTrigger::WMTrigger(const WMTrigger& other984) { + resourcePlanName = other984.resourcePlanName; + triggerName = other984.triggerName; + triggerExpression = other984.triggerExpression; + actionExpression = other984.actionExpression; + isInUnmanaged = other984.isInUnmanaged; + __isset = other984.__isset; +} +WMTrigger& WMTrigger::operator=(const WMTrigger& other985) { + resourcePlanName = other985.resourcePlanName; + triggerName = other985.triggerName; + triggerExpression = other985.triggerExpression; + actionExpression = other985.actionExpression; + isInUnmanaged = other985.isInUnmanaged; + __isset = other985.__isset; return *this; } void WMTrigger::printTo(std::ostream& out) const { @@ -24187,21 +25503,21 @@ void swap(WMMapping &a, WMMapping &b) { swap(a.__isset, b.__isset); } -WMMapping::WMMapping(const WMMapping& other968) { - resourcePlanName = other968.resourcePlanName; - entityType = other968.entityType; - entityName = other968.entityName; - poolPath = other968.poolPath; - ordering = other968.ordering; - __isset = other968.__isset; -} -WMMapping& WMMapping::operator=(const WMMapping& other969) { - resourcePlanName = other969.resourcePlanName; - entityType = other969.entityType; - entityName = other969.entityName; - poolPath = other969.poolPath; - ordering = other969.ordering; - __isset = other969.__isset; +WMMapping::WMMapping(const WMMapping& other986) { + resourcePlanName = other986.resourcePlanName; + entityType = other986.entityType; + entityName = other986.entityName; + poolPath = other986.poolPath; + ordering = other986.ordering; + __isset = other986.__isset; +} +WMMapping& WMMapping::operator=(const WMMapping& other987) { + resourcePlanName = other987.resourcePlanName; + entityType = other987.entityType; + entityName = other987.entityName; + poolPath = other987.poolPath; + ordering = other987.ordering; + __isset = other987.__isset; return *this; } void WMMapping::printTo(std::ostream& out) const { @@ -24307,13 +25623,13 @@ void swap(WMPoolTrigger &a, WMPoolTrigger &b) { swap(a.trigger, b.trigger); } -WMPoolTrigger::WMPoolTrigger(const WMPoolTrigger& other970) { - pool = other970.pool; - trigger = other970.trigger; +WMPoolTrigger::WMPoolTrigger(const WMPoolTrigger& other988) { + pool = other988.pool; + trigger = other988.trigger; } -WMPoolTrigger& WMPoolTrigger::operator=(const WMPoolTrigger& other971) { - pool = other971.pool; - trigger = other971.trigger; +WMPoolTrigger& WMPoolTrigger::operator=(const WMPoolTrigger& other989) { + pool = other989.pool; + trigger = other989.trigger; return *this; } void WMPoolTrigger::printTo(std::ostream& out) const { @@ -24387,14 +25703,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->pools.clear(); - uint32_t _size972; - ::apache::thrift::protocol::TType _etype975; - xfer += iprot->readListBegin(_etype975, _size972); - this->pools.resize(_size972); - uint32_t _i976; - for (_i976 = 0; _i976 < _size972; ++_i976) + uint32_t _size990; + ::apache::thrift::protocol::TType _etype993; + xfer += iprot->readListBegin(_etype993, _size990); + this->pools.resize(_size990); + uint32_t _i994; + for (_i994 = 0; _i994 < _size990; ++_i994) { - xfer += this->pools[_i976].read(iprot); + xfer += this->pools[_i994].read(iprot); } xfer += iprot->readListEnd(); } @@ -24407,14 +25723,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->mappings.clear(); - uint32_t _size977; - ::apache::thrift::protocol::TType _etype980; - xfer += iprot->readListBegin(_etype980, _size977); - this->mappings.resize(_size977); - uint32_t _i981; - for (_i981 = 0; _i981 < _size977; ++_i981) + uint32_t _size995; + ::apache::thrift::protocol::TType _etype998; + xfer += iprot->readListBegin(_etype998, _size995); + this->mappings.resize(_size995); + uint32_t _i999; + for (_i999 = 0; _i999 < _size995; ++_i999) { - xfer += this->mappings[_i981].read(iprot); + xfer += this->mappings[_i999].read(iprot); } xfer += iprot->readListEnd(); } @@ -24427,14 +25743,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->triggers.clear(); - uint32_t _size982; - ::apache::thrift::protocol::TType _etype985; - xfer += iprot->readListBegin(_etype985, _size982); - this->triggers.resize(_size982); - uint32_t _i986; - for (_i986 = 0; _i986 < _size982; ++_i986) + uint32_t _size1000; + ::apache::thrift::protocol::TType _etype1003; + xfer += iprot->readListBegin(_etype1003, _size1000); + this->triggers.resize(_size1000); + uint32_t _i1004; + for (_i1004 = 0; _i1004 < _size1000; ++_i1004) { - xfer += this->triggers[_i986].read(iprot); + xfer += this->triggers[_i1004].read(iprot); } xfer += iprot->readListEnd(); } @@ -24447,14 +25763,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->poolTriggers.clear(); - uint32_t _size987; - ::apache::thrift::protocol::TType _etype990; - xfer += iprot->readListBegin(_etype990, _size987); - this->poolTriggers.resize(_size987); - uint32_t _i991; - for (_i991 = 0; _i991 < _size987; ++_i991) + uint32_t _size1005; + ::apache::thrift::protocol::TType _etype1008; + xfer += iprot->readListBegin(_etype1008, _size1005); + this->poolTriggers.resize(_size1005); + uint32_t _i1009; + for (_i1009 = 0; _i1009 < _size1005; ++_i1009) { - xfer += this->poolTriggers[_i991].read(iprot); + xfer += this->poolTriggers[_i1009].read(iprot); } xfer += iprot->readListEnd(); } @@ -24491,10 +25807,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("pools", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->pools.size())); - std::vector ::const_iterator _iter992; - for (_iter992 = this->pools.begin(); _iter992 != this->pools.end(); ++_iter992) + std::vector ::const_iterator _iter1010; + for (_iter1010 = this->pools.begin(); _iter1010 != this->pools.end(); ++_iter1010) { - xfer += (*_iter992).write(oprot); + xfer += (*_iter1010).write(oprot); } xfer += oprot->writeListEnd(); } @@ -24504,10 +25820,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("mappings", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->mappings.size())); - std::vector ::const_iterator _iter993; - for (_iter993 = this->mappings.begin(); _iter993 != this->mappings.end(); ++_iter993) + std::vector ::const_iterator _iter1011; + for (_iter1011 = this->mappings.begin(); _iter1011 != this->mappings.end(); ++_iter1011) { - xfer += (*_iter993).write(oprot); + xfer += (*_iter1011).write(oprot); } xfer += oprot->writeListEnd(); } @@ -24517,10 +25833,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("triggers", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->triggers.size())); - std::vector ::const_iterator _iter994; - for (_iter994 = this->triggers.begin(); _iter994 != this->triggers.end(); ++_iter994) + std::vector ::const_iterator _iter1012; + for (_iter1012 = this->triggers.begin(); _iter1012 != this->triggers.end(); ++_iter1012) { - xfer += (*_iter994).write(oprot); + xfer += (*_iter1012).write(oprot); } xfer += oprot->writeListEnd(); } @@ -24530,10 +25846,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("poolTriggers", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->poolTriggers.size())); - std::vector ::const_iterator _iter995; - for (_iter995 = this->poolTriggers.begin(); _iter995 != this->poolTriggers.end(); ++_iter995) + std::vector ::const_iterator _iter1013; + for (_iter1013 = this->poolTriggers.begin(); _iter1013 != this->poolTriggers.end(); ++_iter1013) { - xfer += (*_iter995).write(oprot); + xfer += (*_iter1013).write(oprot); } xfer += oprot->writeListEnd(); } @@ -24554,21 +25870,21 @@ void swap(WMFullResourcePlan &a, WMFullResourcePlan &b) { swap(a.__isset, b.__isset); } -WMFullResourcePlan::WMFullResourcePlan(const WMFullResourcePlan& other996) { - plan = other996.plan; - pools = other996.pools; - mappings = other996.mappings; - triggers = other996.triggers; - poolTriggers = other996.poolTriggers; - __isset = other996.__isset; -} -WMFullResourcePlan& WMFullResourcePlan::operator=(const WMFullResourcePlan& other997) { - plan = other997.plan; - pools = other997.pools; - mappings = other997.mappings; - triggers = other997.triggers; - poolTriggers = other997.poolTriggers; - __isset = other997.__isset; +WMFullResourcePlan::WMFullResourcePlan(const WMFullResourcePlan& other1014) { + plan = other1014.plan; + pools = other1014.pools; + mappings = other1014.mappings; + triggers = other1014.triggers; + poolTriggers = other1014.poolTriggers; + __isset = other1014.__isset; +} +WMFullResourcePlan& WMFullResourcePlan::operator=(const WMFullResourcePlan& other1015) { + plan = other1015.plan; + pools = other1015.pools; + mappings = other1015.mappings; + triggers = other1015.triggers; + poolTriggers = other1015.poolTriggers; + __isset = other1015.__isset; return *this; } void WMFullResourcePlan::printTo(std::ostream& out) const { @@ -24673,15 +25989,15 @@ void swap(WMCreateResourcePlanRequest &a, WMCreateResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMCreateResourcePlanRequest::WMCreateResourcePlanRequest(const WMCreateResourcePlanRequest& other998) { - resourcePlan = other998.resourcePlan; - copyFrom = other998.copyFrom; - __isset = other998.__isset; +WMCreateResourcePlanRequest::WMCreateResourcePlanRequest(const WMCreateResourcePlanRequest& other1016) { + resourcePlan = other1016.resourcePlan; + copyFrom = other1016.copyFrom; + __isset = other1016.__isset; } -WMCreateResourcePlanRequest& WMCreateResourcePlanRequest::operator=(const WMCreateResourcePlanRequest& other999) { - resourcePlan = other999.resourcePlan; - copyFrom = other999.copyFrom; - __isset = other999.__isset; +WMCreateResourcePlanRequest& WMCreateResourcePlanRequest::operator=(const WMCreateResourcePlanRequest& other1017) { + resourcePlan = other1017.resourcePlan; + copyFrom = other1017.copyFrom; + __isset = other1017.__isset; return *this; } void WMCreateResourcePlanRequest::printTo(std::ostream& out) const { @@ -24741,11 +26057,11 @@ void swap(WMCreateResourcePlanResponse &a, WMCreateResourcePlanResponse &b) { (void) b; } -WMCreateResourcePlanResponse::WMCreateResourcePlanResponse(const WMCreateResourcePlanResponse& other1000) { - (void) other1000; +WMCreateResourcePlanResponse::WMCreateResourcePlanResponse(const WMCreateResourcePlanResponse& other1018) { + (void) other1018; } -WMCreateResourcePlanResponse& WMCreateResourcePlanResponse::operator=(const WMCreateResourcePlanResponse& other1001) { - (void) other1001; +WMCreateResourcePlanResponse& WMCreateResourcePlanResponse::operator=(const WMCreateResourcePlanResponse& other1019) { + (void) other1019; return *this; } void WMCreateResourcePlanResponse::printTo(std::ostream& out) const { @@ -24803,11 +26119,11 @@ void swap(WMGetActiveResourcePlanRequest &a, WMGetActiveResourcePlanRequest &b) (void) b; } -WMGetActiveResourcePlanRequest::WMGetActiveResourcePlanRequest(const WMGetActiveResourcePlanRequest& other1002) { - (void) other1002; +WMGetActiveResourcePlanRequest::WMGetActiveResourcePlanRequest(const WMGetActiveResourcePlanRequest& other1020) { + (void) other1020; } -WMGetActiveResourcePlanRequest& WMGetActiveResourcePlanRequest::operator=(const WMGetActiveResourcePlanRequest& other1003) { - (void) other1003; +WMGetActiveResourcePlanRequest& WMGetActiveResourcePlanRequest::operator=(const WMGetActiveResourcePlanRequest& other1021) { + (void) other1021; return *this; } void WMGetActiveResourcePlanRequest::printTo(std::ostream& out) const { @@ -24888,13 +26204,13 @@ void swap(WMGetActiveResourcePlanResponse &a, WMGetActiveResourcePlanResponse &b swap(a.__isset, b.__isset); } -WMGetActiveResourcePlanResponse::WMGetActiveResourcePlanResponse(const WMGetActiveResourcePlanResponse& other1004) { - resourcePlan = other1004.resourcePlan; - __isset = other1004.__isset; +WMGetActiveResourcePlanResponse::WMGetActiveResourcePlanResponse(const WMGetActiveResourcePlanResponse& other1022) { + resourcePlan = other1022.resourcePlan; + __isset = other1022.__isset; } -WMGetActiveResourcePlanResponse& WMGetActiveResourcePlanResponse::operator=(const WMGetActiveResourcePlanResponse& other1005) { - resourcePlan = other1005.resourcePlan; - __isset = other1005.__isset; +WMGetActiveResourcePlanResponse& WMGetActiveResourcePlanResponse::operator=(const WMGetActiveResourcePlanResponse& other1023) { + resourcePlan = other1023.resourcePlan; + __isset = other1023.__isset; return *this; } void WMGetActiveResourcePlanResponse::printTo(std::ostream& out) const { @@ -24976,13 +26292,13 @@ void swap(WMGetResourcePlanRequest &a, WMGetResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMGetResourcePlanRequest::WMGetResourcePlanRequest(const WMGetResourcePlanRequest& other1006) { - resourcePlanName = other1006.resourcePlanName; - __isset = other1006.__isset; +WMGetResourcePlanRequest::WMGetResourcePlanRequest(const WMGetResourcePlanRequest& other1024) { + resourcePlanName = other1024.resourcePlanName; + __isset = other1024.__isset; } -WMGetResourcePlanRequest& WMGetResourcePlanRequest::operator=(const WMGetResourcePlanRequest& other1007) { - resourcePlanName = other1007.resourcePlanName; - __isset = other1007.__isset; +WMGetResourcePlanRequest& WMGetResourcePlanRequest::operator=(const WMGetResourcePlanRequest& other1025) { + resourcePlanName = other1025.resourcePlanName; + __isset = other1025.__isset; return *this; } void WMGetResourcePlanRequest::printTo(std::ostream& out) const { @@ -25064,13 +26380,13 @@ void swap(WMGetResourcePlanResponse &a, WMGetResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMGetResourcePlanResponse::WMGetResourcePlanResponse(const WMGetResourcePlanResponse& other1008) { - resourcePlan = other1008.resourcePlan; - __isset = other1008.__isset; +WMGetResourcePlanResponse::WMGetResourcePlanResponse(const WMGetResourcePlanResponse& other1026) { + resourcePlan = other1026.resourcePlan; + __isset = other1026.__isset; } -WMGetResourcePlanResponse& WMGetResourcePlanResponse::operator=(const WMGetResourcePlanResponse& other1009) { - resourcePlan = other1009.resourcePlan; - __isset = other1009.__isset; +WMGetResourcePlanResponse& WMGetResourcePlanResponse::operator=(const WMGetResourcePlanResponse& other1027) { + resourcePlan = other1027.resourcePlan; + __isset = other1027.__isset; return *this; } void WMGetResourcePlanResponse::printTo(std::ostream& out) const { @@ -25129,11 +26445,11 @@ void swap(WMGetAllResourcePlanRequest &a, WMGetAllResourcePlanRequest &b) { (void) b; } -WMGetAllResourcePlanRequest::WMGetAllResourcePlanRequest(const WMGetAllResourcePlanRequest& other1010) { - (void) other1010; +WMGetAllResourcePlanRequest::WMGetAllResourcePlanRequest(const WMGetAllResourcePlanRequest& other1028) { + (void) other1028; } -WMGetAllResourcePlanRequest& WMGetAllResourcePlanRequest::operator=(const WMGetAllResourcePlanRequest& other1011) { - (void) other1011; +WMGetAllResourcePlanRequest& WMGetAllResourcePlanRequest::operator=(const WMGetAllResourcePlanRequest& other1029) { + (void) other1029; return *this; } void WMGetAllResourcePlanRequest::printTo(std::ostream& out) const { @@ -25177,14 +26493,14 @@ uint32_t WMGetAllResourcePlanResponse::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->resourcePlans.clear(); - uint32_t _size1012; - ::apache::thrift::protocol::TType _etype1015; - xfer += iprot->readListBegin(_etype1015, _size1012); - this->resourcePlans.resize(_size1012); - uint32_t _i1016; - for (_i1016 = 0; _i1016 < _size1012; ++_i1016) + uint32_t _size1030; + ::apache::thrift::protocol::TType _etype1033; + xfer += iprot->readListBegin(_etype1033, _size1030); + this->resourcePlans.resize(_size1030); + uint32_t _i1034; + for (_i1034 = 0; _i1034 < _size1030; ++_i1034) { - xfer += this->resourcePlans[_i1016].read(iprot); + xfer += this->resourcePlans[_i1034].read(iprot); } xfer += iprot->readListEnd(); } @@ -25214,10 +26530,10 @@ uint32_t WMGetAllResourcePlanResponse::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("resourcePlans", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->resourcePlans.size())); - std::vector ::const_iterator _iter1017; - for (_iter1017 = this->resourcePlans.begin(); _iter1017 != this->resourcePlans.end(); ++_iter1017) + std::vector ::const_iterator _iter1035; + for (_iter1035 = this->resourcePlans.begin(); _iter1035 != this->resourcePlans.end(); ++_iter1035) { - xfer += (*_iter1017).write(oprot); + xfer += (*_iter1035).write(oprot); } xfer += oprot->writeListEnd(); } @@ -25234,13 +26550,13 @@ void swap(WMGetAllResourcePlanResponse &a, WMGetAllResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMGetAllResourcePlanResponse::WMGetAllResourcePlanResponse(const WMGetAllResourcePlanResponse& other1018) { - resourcePlans = other1018.resourcePlans; - __isset = other1018.__isset; +WMGetAllResourcePlanResponse::WMGetAllResourcePlanResponse(const WMGetAllResourcePlanResponse& other1036) { + resourcePlans = other1036.resourcePlans; + __isset = other1036.__isset; } -WMGetAllResourcePlanResponse& WMGetAllResourcePlanResponse::operator=(const WMGetAllResourcePlanResponse& other1019) { - resourcePlans = other1019.resourcePlans; - __isset = other1019.__isset; +WMGetAllResourcePlanResponse& WMGetAllResourcePlanResponse::operator=(const WMGetAllResourcePlanResponse& other1037) { + resourcePlans = other1037.resourcePlans; + __isset = other1037.__isset; return *this; } void WMGetAllResourcePlanResponse::printTo(std::ostream& out) const { @@ -25398,21 +26714,21 @@ void swap(WMAlterResourcePlanRequest &a, WMAlterResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMAlterResourcePlanRequest::WMAlterResourcePlanRequest(const WMAlterResourcePlanRequest& other1020) { - resourcePlanName = other1020.resourcePlanName; - resourcePlan = other1020.resourcePlan; - isEnableAndActivate = other1020.isEnableAndActivate; - isForceDeactivate = other1020.isForceDeactivate; - isReplace = other1020.isReplace; - __isset = other1020.__isset; -} -WMAlterResourcePlanRequest& WMAlterResourcePlanRequest::operator=(const WMAlterResourcePlanRequest& other1021) { - resourcePlanName = other1021.resourcePlanName; - resourcePlan = other1021.resourcePlan; - isEnableAndActivate = other1021.isEnableAndActivate; - isForceDeactivate = other1021.isForceDeactivate; - isReplace = other1021.isReplace; - __isset = other1021.__isset; +WMAlterResourcePlanRequest::WMAlterResourcePlanRequest(const WMAlterResourcePlanRequest& other1038) { + resourcePlanName = other1038.resourcePlanName; + resourcePlan = other1038.resourcePlan; + isEnableAndActivate = other1038.isEnableAndActivate; + isForceDeactivate = other1038.isForceDeactivate; + isReplace = other1038.isReplace; + __isset = other1038.__isset; +} +WMAlterResourcePlanRequest& WMAlterResourcePlanRequest::operator=(const WMAlterResourcePlanRequest& other1039) { + resourcePlanName = other1039.resourcePlanName; + resourcePlan = other1039.resourcePlan; + isEnableAndActivate = other1039.isEnableAndActivate; + isForceDeactivate = other1039.isForceDeactivate; + isReplace = other1039.isReplace; + __isset = other1039.__isset; return *this; } void WMAlterResourcePlanRequest::printTo(std::ostream& out) const { @@ -25498,13 +26814,13 @@ void swap(WMAlterResourcePlanResponse &a, WMAlterResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMAlterResourcePlanResponse::WMAlterResourcePlanResponse(const WMAlterResourcePlanResponse& other1022) { - fullResourcePlan = other1022.fullResourcePlan; - __isset = other1022.__isset; +WMAlterResourcePlanResponse::WMAlterResourcePlanResponse(const WMAlterResourcePlanResponse& other1040) { + fullResourcePlan = other1040.fullResourcePlan; + __isset = other1040.__isset; } -WMAlterResourcePlanResponse& WMAlterResourcePlanResponse::operator=(const WMAlterResourcePlanResponse& other1023) { - fullResourcePlan = other1023.fullResourcePlan; - __isset = other1023.__isset; +WMAlterResourcePlanResponse& WMAlterResourcePlanResponse::operator=(const WMAlterResourcePlanResponse& other1041) { + fullResourcePlan = other1041.fullResourcePlan; + __isset = other1041.__isset; return *this; } void WMAlterResourcePlanResponse::printTo(std::ostream& out) const { @@ -25586,13 +26902,13 @@ void swap(WMValidateResourcePlanRequest &a, WMValidateResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMValidateResourcePlanRequest::WMValidateResourcePlanRequest(const WMValidateResourcePlanRequest& other1024) { - resourcePlanName = other1024.resourcePlanName; - __isset = other1024.__isset; +WMValidateResourcePlanRequest::WMValidateResourcePlanRequest(const WMValidateResourcePlanRequest& other1042) { + resourcePlanName = other1042.resourcePlanName; + __isset = other1042.__isset; } -WMValidateResourcePlanRequest& WMValidateResourcePlanRequest::operator=(const WMValidateResourcePlanRequest& other1025) { - resourcePlanName = other1025.resourcePlanName; - __isset = other1025.__isset; +WMValidateResourcePlanRequest& WMValidateResourcePlanRequest::operator=(const WMValidateResourcePlanRequest& other1043) { + resourcePlanName = other1043.resourcePlanName; + __isset = other1043.__isset; return *this; } void WMValidateResourcePlanRequest::printTo(std::ostream& out) const { @@ -25642,14 +26958,14 @@ uint32_t WMValidateResourcePlanResponse::read(::apache::thrift::protocol::TProto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->errors.clear(); - uint32_t _size1026; - ::apache::thrift::protocol::TType _etype1029; - xfer += iprot->readListBegin(_etype1029, _size1026); - this->errors.resize(_size1026); - uint32_t _i1030; - for (_i1030 = 0; _i1030 < _size1026; ++_i1030) + uint32_t _size1044; + ::apache::thrift::protocol::TType _etype1047; + xfer += iprot->readListBegin(_etype1047, _size1044); + this->errors.resize(_size1044); + uint32_t _i1048; + for (_i1048 = 0; _i1048 < _size1044; ++_i1048) { - xfer += iprot->readString(this->errors[_i1030]); + xfer += iprot->readString(this->errors[_i1048]); } xfer += iprot->readListEnd(); } @@ -25662,14 +26978,14 @@ uint32_t WMValidateResourcePlanResponse::read(::apache::thrift::protocol::TProto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->warnings.clear(); - uint32_t _size1031; - ::apache::thrift::protocol::TType _etype1034; - xfer += iprot->readListBegin(_etype1034, _size1031); - this->warnings.resize(_size1031); - uint32_t _i1035; - for (_i1035 = 0; _i1035 < _size1031; ++_i1035) + uint32_t _size1049; + ::apache::thrift::protocol::TType _etype1052; + xfer += iprot->readListBegin(_etype1052, _size1049); + this->warnings.resize(_size1049); + uint32_t _i1053; + for (_i1053 = 0; _i1053 < _size1049; ++_i1053) { - xfer += iprot->readString(this->warnings[_i1035]); + xfer += iprot->readString(this->warnings[_i1053]); } xfer += iprot->readListEnd(); } @@ -25699,10 +27015,10 @@ uint32_t WMValidateResourcePlanResponse::write(::apache::thrift::protocol::TProt xfer += oprot->writeFieldBegin("errors", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->errors.size())); - std::vector ::const_iterator _iter1036; - for (_iter1036 = this->errors.begin(); _iter1036 != this->errors.end(); ++_iter1036) + std::vector ::const_iterator _iter1054; + for (_iter1054 = this->errors.begin(); _iter1054 != this->errors.end(); ++_iter1054) { - xfer += oprot->writeString((*_iter1036)); + xfer += oprot->writeString((*_iter1054)); } xfer += oprot->writeListEnd(); } @@ -25712,10 +27028,10 @@ uint32_t WMValidateResourcePlanResponse::write(::apache::thrift::protocol::TProt xfer += oprot->writeFieldBegin("warnings", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->warnings.size())); - std::vector ::const_iterator _iter1037; - for (_iter1037 = this->warnings.begin(); _iter1037 != this->warnings.end(); ++_iter1037) + std::vector ::const_iterator _iter1055; + for (_iter1055 = this->warnings.begin(); _iter1055 != this->warnings.end(); ++_iter1055) { - xfer += oprot->writeString((*_iter1037)); + xfer += oprot->writeString((*_iter1055)); } xfer += oprot->writeListEnd(); } @@ -25733,15 +27049,15 @@ void swap(WMValidateResourcePlanResponse &a, WMValidateResourcePlanResponse &b) swap(a.__isset, b.__isset); } -WMValidateResourcePlanResponse::WMValidateResourcePlanResponse(const WMValidateResourcePlanResponse& other1038) { - errors = other1038.errors; - warnings = other1038.warnings; - __isset = other1038.__isset; +WMValidateResourcePlanResponse::WMValidateResourcePlanResponse(const WMValidateResourcePlanResponse& other1056) { + errors = other1056.errors; + warnings = other1056.warnings; + __isset = other1056.__isset; } -WMValidateResourcePlanResponse& WMValidateResourcePlanResponse::operator=(const WMValidateResourcePlanResponse& other1039) { - errors = other1039.errors; - warnings = other1039.warnings; - __isset = other1039.__isset; +WMValidateResourcePlanResponse& WMValidateResourcePlanResponse::operator=(const WMValidateResourcePlanResponse& other1057) { + errors = other1057.errors; + warnings = other1057.warnings; + __isset = other1057.__isset; return *this; } void WMValidateResourcePlanResponse::printTo(std::ostream& out) const { @@ -25824,13 +27140,13 @@ void swap(WMDropResourcePlanRequest &a, WMDropResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMDropResourcePlanRequest::WMDropResourcePlanRequest(const WMDropResourcePlanRequest& other1040) { - resourcePlanName = other1040.resourcePlanName; - __isset = other1040.__isset; +WMDropResourcePlanRequest::WMDropResourcePlanRequest(const WMDropResourcePlanRequest& other1058) { + resourcePlanName = other1058.resourcePlanName; + __isset = other1058.__isset; } -WMDropResourcePlanRequest& WMDropResourcePlanRequest::operator=(const WMDropResourcePlanRequest& other1041) { - resourcePlanName = other1041.resourcePlanName; - __isset = other1041.__isset; +WMDropResourcePlanRequest& WMDropResourcePlanRequest::operator=(const WMDropResourcePlanRequest& other1059) { + resourcePlanName = other1059.resourcePlanName; + __isset = other1059.__isset; return *this; } void WMDropResourcePlanRequest::printTo(std::ostream& out) const { @@ -25889,11 +27205,11 @@ void swap(WMDropResourcePlanResponse &a, WMDropResourcePlanResponse &b) { (void) b; } -WMDropResourcePlanResponse::WMDropResourcePlanResponse(const WMDropResourcePlanResponse& other1042) { - (void) other1042; +WMDropResourcePlanResponse::WMDropResourcePlanResponse(const WMDropResourcePlanResponse& other1060) { + (void) other1060; } -WMDropResourcePlanResponse& WMDropResourcePlanResponse::operator=(const WMDropResourcePlanResponse& other1043) { - (void) other1043; +WMDropResourcePlanResponse& WMDropResourcePlanResponse::operator=(const WMDropResourcePlanResponse& other1061) { + (void) other1061; return *this; } void WMDropResourcePlanResponse::printTo(std::ostream& out) const { @@ -25974,13 +27290,13 @@ void swap(WMCreateTriggerRequest &a, WMCreateTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMCreateTriggerRequest::WMCreateTriggerRequest(const WMCreateTriggerRequest& other1044) { - trigger = other1044.trigger; - __isset = other1044.__isset; +WMCreateTriggerRequest::WMCreateTriggerRequest(const WMCreateTriggerRequest& other1062) { + trigger = other1062.trigger; + __isset = other1062.__isset; } -WMCreateTriggerRequest& WMCreateTriggerRequest::operator=(const WMCreateTriggerRequest& other1045) { - trigger = other1045.trigger; - __isset = other1045.__isset; +WMCreateTriggerRequest& WMCreateTriggerRequest::operator=(const WMCreateTriggerRequest& other1063) { + trigger = other1063.trigger; + __isset = other1063.__isset; return *this; } void WMCreateTriggerRequest::printTo(std::ostream& out) const { @@ -26039,11 +27355,11 @@ void swap(WMCreateTriggerResponse &a, WMCreateTriggerResponse &b) { (void) b; } -WMCreateTriggerResponse::WMCreateTriggerResponse(const WMCreateTriggerResponse& other1046) { - (void) other1046; +WMCreateTriggerResponse::WMCreateTriggerResponse(const WMCreateTriggerResponse& other1064) { + (void) other1064; } -WMCreateTriggerResponse& WMCreateTriggerResponse::operator=(const WMCreateTriggerResponse& other1047) { - (void) other1047; +WMCreateTriggerResponse& WMCreateTriggerResponse::operator=(const WMCreateTriggerResponse& other1065) { + (void) other1065; return *this; } void WMCreateTriggerResponse::printTo(std::ostream& out) const { @@ -26124,13 +27440,13 @@ void swap(WMAlterTriggerRequest &a, WMAlterTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMAlterTriggerRequest::WMAlterTriggerRequest(const WMAlterTriggerRequest& other1048) { - trigger = other1048.trigger; - __isset = other1048.__isset; +WMAlterTriggerRequest::WMAlterTriggerRequest(const WMAlterTriggerRequest& other1066) { + trigger = other1066.trigger; + __isset = other1066.__isset; } -WMAlterTriggerRequest& WMAlterTriggerRequest::operator=(const WMAlterTriggerRequest& other1049) { - trigger = other1049.trigger; - __isset = other1049.__isset; +WMAlterTriggerRequest& WMAlterTriggerRequest::operator=(const WMAlterTriggerRequest& other1067) { + trigger = other1067.trigger; + __isset = other1067.__isset; return *this; } void WMAlterTriggerRequest::printTo(std::ostream& out) const { @@ -26189,11 +27505,11 @@ void swap(WMAlterTriggerResponse &a, WMAlterTriggerResponse &b) { (void) b; } -WMAlterTriggerResponse::WMAlterTriggerResponse(const WMAlterTriggerResponse& other1050) { - (void) other1050; +WMAlterTriggerResponse::WMAlterTriggerResponse(const WMAlterTriggerResponse& other1068) { + (void) other1068; } -WMAlterTriggerResponse& WMAlterTriggerResponse::operator=(const WMAlterTriggerResponse& other1051) { - (void) other1051; +WMAlterTriggerResponse& WMAlterTriggerResponse::operator=(const WMAlterTriggerResponse& other1069) { + (void) other1069; return *this; } void WMAlterTriggerResponse::printTo(std::ostream& out) const { @@ -26293,15 +27609,15 @@ void swap(WMDropTriggerRequest &a, WMDropTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMDropTriggerRequest::WMDropTriggerRequest(const WMDropTriggerRequest& other1052) { - resourcePlanName = other1052.resourcePlanName; - triggerName = other1052.triggerName; - __isset = other1052.__isset; +WMDropTriggerRequest::WMDropTriggerRequest(const WMDropTriggerRequest& other1070) { + resourcePlanName = other1070.resourcePlanName; + triggerName = other1070.triggerName; + __isset = other1070.__isset; } -WMDropTriggerRequest& WMDropTriggerRequest::operator=(const WMDropTriggerRequest& other1053) { - resourcePlanName = other1053.resourcePlanName; - triggerName = other1053.triggerName; - __isset = other1053.__isset; +WMDropTriggerRequest& WMDropTriggerRequest::operator=(const WMDropTriggerRequest& other1071) { + resourcePlanName = other1071.resourcePlanName; + triggerName = other1071.triggerName; + __isset = other1071.__isset; return *this; } void WMDropTriggerRequest::printTo(std::ostream& out) const { @@ -26361,11 +27677,11 @@ void swap(WMDropTriggerResponse &a, WMDropTriggerResponse &b) { (void) b; } -WMDropTriggerResponse::WMDropTriggerResponse(const WMDropTriggerResponse& other1054) { - (void) other1054; +WMDropTriggerResponse::WMDropTriggerResponse(const WMDropTriggerResponse& other1072) { + (void) other1072; } -WMDropTriggerResponse& WMDropTriggerResponse::operator=(const WMDropTriggerResponse& other1055) { - (void) other1055; +WMDropTriggerResponse& WMDropTriggerResponse::operator=(const WMDropTriggerResponse& other1073) { + (void) other1073; return *this; } void WMDropTriggerResponse::printTo(std::ostream& out) const { @@ -26446,13 +27762,13 @@ void swap(WMGetTriggersForResourePlanRequest &a, WMGetTriggersForResourePlanRequ swap(a.__isset, b.__isset); } -WMGetTriggersForResourePlanRequest::WMGetTriggersForResourePlanRequest(const WMGetTriggersForResourePlanRequest& other1056) { - resourcePlanName = other1056.resourcePlanName; - __isset = other1056.__isset; +WMGetTriggersForResourePlanRequest::WMGetTriggersForResourePlanRequest(const WMGetTriggersForResourePlanRequest& other1074) { + resourcePlanName = other1074.resourcePlanName; + __isset = other1074.__isset; } -WMGetTriggersForResourePlanRequest& WMGetTriggersForResourePlanRequest::operator=(const WMGetTriggersForResourePlanRequest& other1057) { - resourcePlanName = other1057.resourcePlanName; - __isset = other1057.__isset; +WMGetTriggersForResourePlanRequest& WMGetTriggersForResourePlanRequest::operator=(const WMGetTriggersForResourePlanRequest& other1075) { + resourcePlanName = other1075.resourcePlanName; + __isset = other1075.__isset; return *this; } void WMGetTriggersForResourePlanRequest::printTo(std::ostream& out) const { @@ -26497,14 +27813,14 @@ uint32_t WMGetTriggersForResourePlanResponse::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { this->triggers.clear(); - uint32_t _size1058; - ::apache::thrift::protocol::TType _etype1061; - xfer += iprot->readListBegin(_etype1061, _size1058); - this->triggers.resize(_size1058); - uint32_t _i1062; - for (_i1062 = 0; _i1062 < _size1058; ++_i1062) + uint32_t _size1076; + ::apache::thrift::protocol::TType _etype1079; + xfer += iprot->readListBegin(_etype1079, _size1076); + this->triggers.resize(_size1076); + uint32_t _i1080; + for (_i1080 = 0; _i1080 < _size1076; ++_i1080) { - xfer += this->triggers[_i1062].read(iprot); + xfer += this->triggers[_i1080].read(iprot); } xfer += iprot->readListEnd(); } @@ -26534,10 +27850,10 @@ uint32_t WMGetTriggersForResourePlanResponse::write(::apache::thrift::protocol:: xfer += oprot->writeFieldBegin("triggers", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->triggers.size())); - std::vector ::const_iterator _iter1063; - for (_iter1063 = this->triggers.begin(); _iter1063 != this->triggers.end(); ++_iter1063) + std::vector ::const_iterator _iter1081; + for (_iter1081 = this->triggers.begin(); _iter1081 != this->triggers.end(); ++_iter1081) { - xfer += (*_iter1063).write(oprot); + xfer += (*_iter1081).write(oprot); } xfer += oprot->writeListEnd(); } @@ -26554,13 +27870,13 @@ void swap(WMGetTriggersForResourePlanResponse &a, WMGetTriggersForResourePlanRes swap(a.__isset, b.__isset); } -WMGetTriggersForResourePlanResponse::WMGetTriggersForResourePlanResponse(const WMGetTriggersForResourePlanResponse& other1064) { - triggers = other1064.triggers; - __isset = other1064.__isset; +WMGetTriggersForResourePlanResponse::WMGetTriggersForResourePlanResponse(const WMGetTriggersForResourePlanResponse& other1082) { + triggers = other1082.triggers; + __isset = other1082.__isset; } -WMGetTriggersForResourePlanResponse& WMGetTriggersForResourePlanResponse::operator=(const WMGetTriggersForResourePlanResponse& other1065) { - triggers = other1065.triggers; - __isset = other1065.__isset; +WMGetTriggersForResourePlanResponse& WMGetTriggersForResourePlanResponse::operator=(const WMGetTriggersForResourePlanResponse& other1083) { + triggers = other1083.triggers; + __isset = other1083.__isset; return *this; } void WMGetTriggersForResourePlanResponse::printTo(std::ostream& out) const { @@ -26642,13 +27958,13 @@ void swap(WMCreatePoolRequest &a, WMCreatePoolRequest &b) { swap(a.__isset, b.__isset); } -WMCreatePoolRequest::WMCreatePoolRequest(const WMCreatePoolRequest& other1066) { - pool = other1066.pool; - __isset = other1066.__isset; +WMCreatePoolRequest::WMCreatePoolRequest(const WMCreatePoolRequest& other1084) { + pool = other1084.pool; + __isset = other1084.__isset; } -WMCreatePoolRequest& WMCreatePoolRequest::operator=(const WMCreatePoolRequest& other1067) { - pool = other1067.pool; - __isset = other1067.__isset; +WMCreatePoolRequest& WMCreatePoolRequest::operator=(const WMCreatePoolRequest& other1085) { + pool = other1085.pool; + __isset = other1085.__isset; return *this; } void WMCreatePoolRequest::printTo(std::ostream& out) const { @@ -26707,11 +28023,11 @@ void swap(WMCreatePoolResponse &a, WMCreatePoolResponse &b) { (void) b; } -WMCreatePoolResponse::WMCreatePoolResponse(const WMCreatePoolResponse& other1068) { - (void) other1068; +WMCreatePoolResponse::WMCreatePoolResponse(const WMCreatePoolResponse& other1086) { + (void) other1086; } -WMCreatePoolResponse& WMCreatePoolResponse::operator=(const WMCreatePoolResponse& other1069) { - (void) other1069; +WMCreatePoolResponse& WMCreatePoolResponse::operator=(const WMCreatePoolResponse& other1087) { + (void) other1087; return *this; } void WMCreatePoolResponse::printTo(std::ostream& out) const { @@ -26811,15 +28127,15 @@ void swap(WMAlterPoolRequest &a, WMAlterPoolRequest &b) { swap(a.__isset, b.__isset); } -WMAlterPoolRequest::WMAlterPoolRequest(const WMAlterPoolRequest& other1070) { - pool = other1070.pool; - poolPath = other1070.poolPath; - __isset = other1070.__isset; +WMAlterPoolRequest::WMAlterPoolRequest(const WMAlterPoolRequest& other1088) { + pool = other1088.pool; + poolPath = other1088.poolPath; + __isset = other1088.__isset; } -WMAlterPoolRequest& WMAlterPoolRequest::operator=(const WMAlterPoolRequest& other1071) { - pool = other1071.pool; - poolPath = other1071.poolPath; - __isset = other1071.__isset; +WMAlterPoolRequest& WMAlterPoolRequest::operator=(const WMAlterPoolRequest& other1089) { + pool = other1089.pool; + poolPath = other1089.poolPath; + __isset = other1089.__isset; return *this; } void WMAlterPoolRequest::printTo(std::ostream& out) const { @@ -26879,11 +28195,11 @@ void swap(WMAlterPoolResponse &a, WMAlterPoolResponse &b) { (void) b; } -WMAlterPoolResponse::WMAlterPoolResponse(const WMAlterPoolResponse& other1072) { - (void) other1072; +WMAlterPoolResponse::WMAlterPoolResponse(const WMAlterPoolResponse& other1090) { + (void) other1090; } -WMAlterPoolResponse& WMAlterPoolResponse::operator=(const WMAlterPoolResponse& other1073) { - (void) other1073; +WMAlterPoolResponse& WMAlterPoolResponse::operator=(const WMAlterPoolResponse& other1091) { + (void) other1091; return *this; } void WMAlterPoolResponse::printTo(std::ostream& out) const { @@ -26983,15 +28299,15 @@ void swap(WMDropPoolRequest &a, WMDropPoolRequest &b) { swap(a.__isset, b.__isset); } -WMDropPoolRequest::WMDropPoolRequest(const WMDropPoolRequest& other1074) { - resourcePlanName = other1074.resourcePlanName; - poolPath = other1074.poolPath; - __isset = other1074.__isset; +WMDropPoolRequest::WMDropPoolRequest(const WMDropPoolRequest& other1092) { + resourcePlanName = other1092.resourcePlanName; + poolPath = other1092.poolPath; + __isset = other1092.__isset; } -WMDropPoolRequest& WMDropPoolRequest::operator=(const WMDropPoolRequest& other1075) { - resourcePlanName = other1075.resourcePlanName; - poolPath = other1075.poolPath; - __isset = other1075.__isset; +WMDropPoolRequest& WMDropPoolRequest::operator=(const WMDropPoolRequest& other1093) { + resourcePlanName = other1093.resourcePlanName; + poolPath = other1093.poolPath; + __isset = other1093.__isset; return *this; } void WMDropPoolRequest::printTo(std::ostream& out) const { @@ -27051,11 +28367,11 @@ void swap(WMDropPoolResponse &a, WMDropPoolResponse &b) { (void) b; } -WMDropPoolResponse::WMDropPoolResponse(const WMDropPoolResponse& other1076) { - (void) other1076; +WMDropPoolResponse::WMDropPoolResponse(const WMDropPoolResponse& other1094) { + (void) other1094; } -WMDropPoolResponse& WMDropPoolResponse::operator=(const WMDropPoolResponse& other1077) { - (void) other1077; +WMDropPoolResponse& WMDropPoolResponse::operator=(const WMDropPoolResponse& other1095) { + (void) other1095; return *this; } void WMDropPoolResponse::printTo(std::ostream& out) const { @@ -27155,15 +28471,15 @@ void swap(WMCreateOrUpdateMappingRequest &a, WMCreateOrUpdateMappingRequest &b) swap(a.__isset, b.__isset); } -WMCreateOrUpdateMappingRequest::WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest& other1078) { - mapping = other1078.mapping; - update = other1078.update; - __isset = other1078.__isset; +WMCreateOrUpdateMappingRequest::WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest& other1096) { + mapping = other1096.mapping; + update = other1096.update; + __isset = other1096.__isset; } -WMCreateOrUpdateMappingRequest& WMCreateOrUpdateMappingRequest::operator=(const WMCreateOrUpdateMappingRequest& other1079) { - mapping = other1079.mapping; - update = other1079.update; - __isset = other1079.__isset; +WMCreateOrUpdateMappingRequest& WMCreateOrUpdateMappingRequest::operator=(const WMCreateOrUpdateMappingRequest& other1097) { + mapping = other1097.mapping; + update = other1097.update; + __isset = other1097.__isset; return *this; } void WMCreateOrUpdateMappingRequest::printTo(std::ostream& out) const { @@ -27223,11 +28539,11 @@ void swap(WMCreateOrUpdateMappingResponse &a, WMCreateOrUpdateMappingResponse &b (void) b; } -WMCreateOrUpdateMappingResponse::WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse& other1080) { - (void) other1080; +WMCreateOrUpdateMappingResponse::WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse& other1098) { + (void) other1098; } -WMCreateOrUpdateMappingResponse& WMCreateOrUpdateMappingResponse::operator=(const WMCreateOrUpdateMappingResponse& other1081) { - (void) other1081; +WMCreateOrUpdateMappingResponse& WMCreateOrUpdateMappingResponse::operator=(const WMCreateOrUpdateMappingResponse& other1099) { + (void) other1099; return *this; } void WMCreateOrUpdateMappingResponse::printTo(std::ostream& out) const { @@ -27308,13 +28624,13 @@ void swap(WMDropMappingRequest &a, WMDropMappingRequest &b) { swap(a.__isset, b.__isset); } -WMDropMappingRequest::WMDropMappingRequest(const WMDropMappingRequest& other1082) { - mapping = other1082.mapping; - __isset = other1082.__isset; +WMDropMappingRequest::WMDropMappingRequest(const WMDropMappingRequest& other1100) { + mapping = other1100.mapping; + __isset = other1100.__isset; } -WMDropMappingRequest& WMDropMappingRequest::operator=(const WMDropMappingRequest& other1083) { - mapping = other1083.mapping; - __isset = other1083.__isset; +WMDropMappingRequest& WMDropMappingRequest::operator=(const WMDropMappingRequest& other1101) { + mapping = other1101.mapping; + __isset = other1101.__isset; return *this; } void WMDropMappingRequest::printTo(std::ostream& out) const { @@ -27373,11 +28689,11 @@ void swap(WMDropMappingResponse &a, WMDropMappingResponse &b) { (void) b; } -WMDropMappingResponse::WMDropMappingResponse(const WMDropMappingResponse& other1084) { - (void) other1084; +WMDropMappingResponse::WMDropMappingResponse(const WMDropMappingResponse& other1102) { + (void) other1102; } -WMDropMappingResponse& WMDropMappingResponse::operator=(const WMDropMappingResponse& other1085) { - (void) other1085; +WMDropMappingResponse& WMDropMappingResponse::operator=(const WMDropMappingResponse& other1103) { + (void) other1103; return *this; } void WMDropMappingResponse::printTo(std::ostream& out) const { @@ -27515,19 +28831,19 @@ void swap(WMCreateOrDropTriggerToPoolMappingRequest &a, WMCreateOrDropTriggerToP swap(a.__isset, b.__isset); } -WMCreateOrDropTriggerToPoolMappingRequest::WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest& other1086) { - resourcePlanName = other1086.resourcePlanName; - triggerName = other1086.triggerName; - poolPath = other1086.poolPath; - drop = other1086.drop; - __isset = other1086.__isset; +WMCreateOrDropTriggerToPoolMappingRequest::WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest& other1104) { + resourcePlanName = other1104.resourcePlanName; + triggerName = other1104.triggerName; + poolPath = other1104.poolPath; + drop = other1104.drop; + __isset = other1104.__isset; } -WMCreateOrDropTriggerToPoolMappingRequest& WMCreateOrDropTriggerToPoolMappingRequest::operator=(const WMCreateOrDropTriggerToPoolMappingRequest& other1087) { - resourcePlanName = other1087.resourcePlanName; - triggerName = other1087.triggerName; - poolPath = other1087.poolPath; - drop = other1087.drop; - __isset = other1087.__isset; +WMCreateOrDropTriggerToPoolMappingRequest& WMCreateOrDropTriggerToPoolMappingRequest::operator=(const WMCreateOrDropTriggerToPoolMappingRequest& other1105) { + resourcePlanName = other1105.resourcePlanName; + triggerName = other1105.triggerName; + poolPath = other1105.poolPath; + drop = other1105.drop; + __isset = other1105.__isset; return *this; } void WMCreateOrDropTriggerToPoolMappingRequest::printTo(std::ostream& out) const { @@ -27589,11 +28905,11 @@ void swap(WMCreateOrDropTriggerToPoolMappingResponse &a, WMCreateOrDropTriggerTo (void) b; } -WMCreateOrDropTriggerToPoolMappingResponse::WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse& other1088) { - (void) other1088; +WMCreateOrDropTriggerToPoolMappingResponse::WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse& other1106) { + (void) other1106; } -WMCreateOrDropTriggerToPoolMappingResponse& WMCreateOrDropTriggerToPoolMappingResponse::operator=(const WMCreateOrDropTriggerToPoolMappingResponse& other1089) { - (void) other1089; +WMCreateOrDropTriggerToPoolMappingResponse& WMCreateOrDropTriggerToPoolMappingResponse::operator=(const WMCreateOrDropTriggerToPoolMappingResponse& other1107) { + (void) other1107; return *this; } void WMCreateOrDropTriggerToPoolMappingResponse::printTo(std::ostream& out) const { @@ -27615,6 +28931,10 @@ void ISchema::__set_name(const std::string& val) { this->name = val; } +void ISchema::__set_catName(const std::string& val) { + this->catName = val; +} + void ISchema::__set_dbName(const std::string& val) { this->dbName = val; } @@ -27664,9 +28984,9 @@ uint32_t ISchema::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1090; - xfer += iprot->readI32(ecast1090); - this->schemaType = (SchemaType::type)ecast1090; + int32_t ecast1108; + xfer += iprot->readI32(ecast1108); + this->schemaType = (SchemaType::type)ecast1108; this->__isset.schemaType = true; } else { xfer += iprot->skip(ftype); @@ -27682,33 +29002,41 @@ uint32_t ISchema::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->dbName); this->__isset.dbName = true; } else { xfer += iprot->skip(ftype); } break; - case 4: + case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1091; - xfer += iprot->readI32(ecast1091); - this->compatibility = (SchemaCompatibility::type)ecast1091; + int32_t ecast1109; + xfer += iprot->readI32(ecast1109); + this->compatibility = (SchemaCompatibility::type)ecast1109; this->__isset.compatibility = true; } else { xfer += iprot->skip(ftype); } break; - case 5: + case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1092; - xfer += iprot->readI32(ecast1092); - this->validationLevel = (SchemaValidation::type)ecast1092; + int32_t ecast1110; + xfer += iprot->readI32(ecast1110); + this->validationLevel = (SchemaValidation::type)ecast1110; this->__isset.validationLevel = true; } else { xfer += iprot->skip(ftype); } break; - case 6: + case 7: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->canEvolve); this->__isset.canEvolve = true; @@ -27716,7 +29044,7 @@ uint32_t ISchema::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; - case 7: + case 8: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->schemaGroup); this->__isset.schemaGroup = true; @@ -27724,7 +29052,7 @@ uint32_t ISchema::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; - case 8: + case 9: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->description); this->__isset.description = true; @@ -27757,29 +29085,33 @@ uint32_t ISchema::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("compatibility", ::apache::thrift::protocol::T_I32, 4); + xfer += oprot->writeFieldBegin("compatibility", ::apache::thrift::protocol::T_I32, 5); xfer += oprot->writeI32((int32_t)this->compatibility); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("validationLevel", ::apache::thrift::protocol::T_I32, 5); + xfer += oprot->writeFieldBegin("validationLevel", ::apache::thrift::protocol::T_I32, 6); xfer += oprot->writeI32((int32_t)this->validationLevel); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("canEvolve", ::apache::thrift::protocol::T_BOOL, 6); + xfer += oprot->writeFieldBegin("canEvolve", ::apache::thrift::protocol::T_BOOL, 7); xfer += oprot->writeBool(this->canEvolve); xfer += oprot->writeFieldEnd(); if (this->__isset.schemaGroup) { - xfer += oprot->writeFieldBegin("schemaGroup", ::apache::thrift::protocol::T_STRING, 7); + xfer += oprot->writeFieldBegin("schemaGroup", ::apache::thrift::protocol::T_STRING, 8); xfer += oprot->writeString(this->schemaGroup); xfer += oprot->writeFieldEnd(); } if (this->__isset.description) { - xfer += oprot->writeFieldBegin("description", ::apache::thrift::protocol::T_STRING, 8); + xfer += oprot->writeFieldBegin("description", ::apache::thrift::protocol::T_STRING, 9); xfer += oprot->writeString(this->description); xfer += oprot->writeFieldEnd(); } @@ -27792,6 +29124,7 @@ void swap(ISchema &a, ISchema &b) { using ::std::swap; swap(a.schemaType, b.schemaType); swap(a.name, b.name); + swap(a.catName, b.catName); swap(a.dbName, b.dbName); swap(a.compatibility, b.compatibility); swap(a.validationLevel, b.validationLevel); @@ -27801,27 +29134,29 @@ void swap(ISchema &a, ISchema &b) { swap(a.__isset, b.__isset); } -ISchema::ISchema(const ISchema& other1093) { - schemaType = other1093.schemaType; - name = other1093.name; - dbName = other1093.dbName; - compatibility = other1093.compatibility; - validationLevel = other1093.validationLevel; - canEvolve = other1093.canEvolve; - schemaGroup = other1093.schemaGroup; - description = other1093.description; - __isset = other1093.__isset; +ISchema::ISchema(const ISchema& other1111) { + schemaType = other1111.schemaType; + name = other1111.name; + catName = other1111.catName; + dbName = other1111.dbName; + compatibility = other1111.compatibility; + validationLevel = other1111.validationLevel; + canEvolve = other1111.canEvolve; + schemaGroup = other1111.schemaGroup; + description = other1111.description; + __isset = other1111.__isset; } -ISchema& ISchema::operator=(const ISchema& other1094) { - schemaType = other1094.schemaType; - name = other1094.name; - dbName = other1094.dbName; - compatibility = other1094.compatibility; - validationLevel = other1094.validationLevel; - canEvolve = other1094.canEvolve; - schemaGroup = other1094.schemaGroup; - description = other1094.description; - __isset = other1094.__isset; +ISchema& ISchema::operator=(const ISchema& other1112) { + schemaType = other1112.schemaType; + name = other1112.name; + catName = other1112.catName; + dbName = other1112.dbName; + compatibility = other1112.compatibility; + validationLevel = other1112.validationLevel; + canEvolve = other1112.canEvolve; + schemaGroup = other1112.schemaGroup; + description = other1112.description; + __isset = other1112.__isset; return *this; } void ISchema::printTo(std::ostream& out) const { @@ -27829,6 +29164,7 @@ void ISchema::printTo(std::ostream& out) const { out << "ISchema("; out << "schemaType=" << to_string(schemaType); out << ", " << "name=" << to_string(name); + out << ", " << "catName=" << to_string(catName); out << ", " << "dbName=" << to_string(dbName); out << ", " << "compatibility=" << to_string(compatibility); out << ", " << "validationLevel=" << to_string(validationLevel); @@ -27843,6 +29179,10 @@ ISchemaName::~ISchemaName() throw() { } +void ISchemaName::__set_catName(const std::string& val) { + this->catName = val; +} + void ISchemaName::__set_dbName(const std::string& val) { this->dbName = val; } @@ -27874,13 +29214,21 @@ uint32_t ISchemaName::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->dbName); this->__isset.dbName = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->schemaName); this->__isset.schemaName = true; @@ -27905,11 +29253,15 @@ uint32_t ISchemaName::write(::apache::thrift::protocol::TProtocol* oprot) const apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("ISchemaName"); - xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("schemaName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("schemaName", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->schemaName); xfer += oprot->writeFieldEnd(); @@ -27920,26 +29272,30 @@ uint32_t ISchemaName::write(::apache::thrift::protocol::TProtocol* oprot) const void swap(ISchemaName &a, ISchemaName &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.dbName, b.dbName); swap(a.schemaName, b.schemaName); swap(a.__isset, b.__isset); } -ISchemaName::ISchemaName(const ISchemaName& other1095) { - dbName = other1095.dbName; - schemaName = other1095.schemaName; - __isset = other1095.__isset; +ISchemaName::ISchemaName(const ISchemaName& other1113) { + catName = other1113.catName; + dbName = other1113.dbName; + schemaName = other1113.schemaName; + __isset = other1113.__isset; } -ISchemaName& ISchemaName::operator=(const ISchemaName& other1096) { - dbName = other1096.dbName; - schemaName = other1096.schemaName; - __isset = other1096.__isset; +ISchemaName& ISchemaName::operator=(const ISchemaName& other1114) { + catName = other1114.catName; + dbName = other1114.dbName; + schemaName = other1114.schemaName; + __isset = other1114.__isset; return *this; } void ISchemaName::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "ISchemaName("; - out << "dbName=" << to_string(dbName); + out << "catName=" << to_string(catName); + out << ", " << "dbName=" << to_string(dbName); out << ", " << "schemaName=" << to_string(schemaName); out << ")"; } @@ -28031,15 +29387,15 @@ void swap(AlterISchemaRequest &a, AlterISchemaRequest &b) { swap(a.__isset, b.__isset); } -AlterISchemaRequest::AlterISchemaRequest(const AlterISchemaRequest& other1097) { - name = other1097.name; - newSchema = other1097.newSchema; - __isset = other1097.__isset; +AlterISchemaRequest::AlterISchemaRequest(const AlterISchemaRequest& other1115) { + name = other1115.name; + newSchema = other1115.newSchema; + __isset = other1115.__isset; } -AlterISchemaRequest& AlterISchemaRequest::operator=(const AlterISchemaRequest& other1098) { - name = other1098.name; - newSchema = other1098.newSchema; - __isset = other1098.__isset; +AlterISchemaRequest& AlterISchemaRequest::operator=(const AlterISchemaRequest& other1116) { + name = other1116.name; + newSchema = other1116.newSchema; + __isset = other1116.__isset; return *this; } void AlterISchemaRequest::printTo(std::ostream& out) const { @@ -28150,14 +29506,14 @@ uint32_t SchemaVersion::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->cols.clear(); - uint32_t _size1099; - ::apache::thrift::protocol::TType _etype1102; - xfer += iprot->readListBegin(_etype1102, _size1099); - this->cols.resize(_size1099); - uint32_t _i1103; - for (_i1103 = 0; _i1103 < _size1099; ++_i1103) + uint32_t _size1117; + ::apache::thrift::protocol::TType _etype1120; + xfer += iprot->readListBegin(_etype1120, _size1117); + this->cols.resize(_size1117); + uint32_t _i1121; + for (_i1121 = 0; _i1121 < _size1117; ++_i1121) { - xfer += this->cols[_i1103].read(iprot); + xfer += this->cols[_i1121].read(iprot); } xfer += iprot->readListEnd(); } @@ -28168,9 +29524,9 @@ uint32_t SchemaVersion::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1104; - xfer += iprot->readI32(ecast1104); - this->state = (SchemaVersionState::type)ecast1104; + int32_t ecast1122; + xfer += iprot->readI32(ecast1122); + this->state = (SchemaVersionState::type)ecast1122; this->__isset.state = true; } else { xfer += iprot->skip(ftype); @@ -28248,10 +29604,10 @@ uint32_t SchemaVersion::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += oprot->writeFieldBegin("cols", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->cols.size())); - std::vector ::const_iterator _iter1105; - for (_iter1105 = this->cols.begin(); _iter1105 != this->cols.end(); ++_iter1105) + std::vector ::const_iterator _iter1123; + for (_iter1123 = this->cols.begin(); _iter1123 != this->cols.end(); ++_iter1123) { - xfer += (*_iter1105).write(oprot); + xfer += (*_iter1123).write(oprot); } xfer += oprot->writeListEnd(); } @@ -28307,31 +29663,31 @@ void swap(SchemaVersion &a, SchemaVersion &b) { swap(a.__isset, b.__isset); } -SchemaVersion::SchemaVersion(const SchemaVersion& other1106) { - schema = other1106.schema; - version = other1106.version; - createdAt = other1106.createdAt; - cols = other1106.cols; - state = other1106.state; - description = other1106.description; - schemaText = other1106.schemaText; - fingerprint = other1106.fingerprint; - name = other1106.name; - serDe = other1106.serDe; - __isset = other1106.__isset; -} -SchemaVersion& SchemaVersion::operator=(const SchemaVersion& other1107) { - schema = other1107.schema; - version = other1107.version; - createdAt = other1107.createdAt; - cols = other1107.cols; - state = other1107.state; - description = other1107.description; - schemaText = other1107.schemaText; - fingerprint = other1107.fingerprint; - name = other1107.name; - serDe = other1107.serDe; - __isset = other1107.__isset; +SchemaVersion::SchemaVersion(const SchemaVersion& other1124) { + schema = other1124.schema; + version = other1124.version; + createdAt = other1124.createdAt; + cols = other1124.cols; + state = other1124.state; + description = other1124.description; + schemaText = other1124.schemaText; + fingerprint = other1124.fingerprint; + name = other1124.name; + serDe = other1124.serDe; + __isset = other1124.__isset; +} +SchemaVersion& SchemaVersion::operator=(const SchemaVersion& other1125) { + schema = other1125.schema; + version = other1125.version; + createdAt = other1125.createdAt; + cols = other1125.cols; + state = other1125.state; + description = other1125.description; + schemaText = other1125.schemaText; + fingerprint = other1125.fingerprint; + name = other1125.name; + serDe = other1125.serDe; + __isset = other1125.__isset; return *this; } void SchemaVersion::printTo(std::ostream& out) const { @@ -28437,15 +29793,15 @@ void swap(SchemaVersionDescriptor &a, SchemaVersionDescriptor &b) { swap(a.__isset, b.__isset); } -SchemaVersionDescriptor::SchemaVersionDescriptor(const SchemaVersionDescriptor& other1108) { - schema = other1108.schema; - version = other1108.version; - __isset = other1108.__isset; +SchemaVersionDescriptor::SchemaVersionDescriptor(const SchemaVersionDescriptor& other1126) { + schema = other1126.schema; + version = other1126.version; + __isset = other1126.__isset; } -SchemaVersionDescriptor& SchemaVersionDescriptor::operator=(const SchemaVersionDescriptor& other1109) { - schema = other1109.schema; - version = other1109.version; - __isset = other1109.__isset; +SchemaVersionDescriptor& SchemaVersionDescriptor::operator=(const SchemaVersionDescriptor& other1127) { + schema = other1127.schema; + version = other1127.version; + __isset = other1127.__isset; return *this; } void SchemaVersionDescriptor::printTo(std::ostream& out) const { @@ -28566,17 +29922,17 @@ void swap(FindSchemasByColsRqst &a, FindSchemasByColsRqst &b) { swap(a.__isset, b.__isset); } -FindSchemasByColsRqst::FindSchemasByColsRqst(const FindSchemasByColsRqst& other1110) { - colName = other1110.colName; - colNamespace = other1110.colNamespace; - type = other1110.type; - __isset = other1110.__isset; +FindSchemasByColsRqst::FindSchemasByColsRqst(const FindSchemasByColsRqst& other1128) { + colName = other1128.colName; + colNamespace = other1128.colNamespace; + type = other1128.type; + __isset = other1128.__isset; } -FindSchemasByColsRqst& FindSchemasByColsRqst::operator=(const FindSchemasByColsRqst& other1111) { - colName = other1111.colName; - colNamespace = other1111.colNamespace; - type = other1111.type; - __isset = other1111.__isset; +FindSchemasByColsRqst& FindSchemasByColsRqst::operator=(const FindSchemasByColsRqst& other1129) { + colName = other1129.colName; + colNamespace = other1129.colNamespace; + type = other1129.type; + __isset = other1129.__isset; return *this; } void FindSchemasByColsRqst::printTo(std::ostream& out) const { @@ -28622,14 +29978,14 @@ uint32_t FindSchemasByColsResp::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->schemaVersions.clear(); - uint32_t _size1112; - ::apache::thrift::protocol::TType _etype1115; - xfer += iprot->readListBegin(_etype1115, _size1112); - this->schemaVersions.resize(_size1112); - uint32_t _i1116; - for (_i1116 = 0; _i1116 < _size1112; ++_i1116) + uint32_t _size1130; + ::apache::thrift::protocol::TType _etype1133; + xfer += iprot->readListBegin(_etype1133, _size1130); + this->schemaVersions.resize(_size1130); + uint32_t _i1134; + for (_i1134 = 0; _i1134 < _size1130; ++_i1134) { - xfer += this->schemaVersions[_i1116].read(iprot); + xfer += this->schemaVersions[_i1134].read(iprot); } xfer += iprot->readListEnd(); } @@ -28658,10 +30014,10 @@ uint32_t FindSchemasByColsResp::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("schemaVersions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->schemaVersions.size())); - std::vector ::const_iterator _iter1117; - for (_iter1117 = this->schemaVersions.begin(); _iter1117 != this->schemaVersions.end(); ++_iter1117) + std::vector ::const_iterator _iter1135; + for (_iter1135 = this->schemaVersions.begin(); _iter1135 != this->schemaVersions.end(); ++_iter1135) { - xfer += (*_iter1117).write(oprot); + xfer += (*_iter1135).write(oprot); } xfer += oprot->writeListEnd(); } @@ -28678,13 +30034,13 @@ void swap(FindSchemasByColsResp &a, FindSchemasByColsResp &b) { swap(a.__isset, b.__isset); } -FindSchemasByColsResp::FindSchemasByColsResp(const FindSchemasByColsResp& other1118) { - schemaVersions = other1118.schemaVersions; - __isset = other1118.__isset; +FindSchemasByColsResp::FindSchemasByColsResp(const FindSchemasByColsResp& other1136) { + schemaVersions = other1136.schemaVersions; + __isset = other1136.__isset; } -FindSchemasByColsResp& FindSchemasByColsResp::operator=(const FindSchemasByColsResp& other1119) { - schemaVersions = other1119.schemaVersions; - __isset = other1119.__isset; +FindSchemasByColsResp& FindSchemasByColsResp::operator=(const FindSchemasByColsResp& other1137) { + schemaVersions = other1137.schemaVersions; + __isset = other1137.__isset; return *this; } void FindSchemasByColsResp::printTo(std::ostream& out) const { @@ -28781,15 +30137,15 @@ void swap(MapSchemaVersionToSerdeRequest &a, MapSchemaVersionToSerdeRequest &b) swap(a.__isset, b.__isset); } -MapSchemaVersionToSerdeRequest::MapSchemaVersionToSerdeRequest(const MapSchemaVersionToSerdeRequest& other1120) { - schemaVersion = other1120.schemaVersion; - serdeName = other1120.serdeName; - __isset = other1120.__isset; +MapSchemaVersionToSerdeRequest::MapSchemaVersionToSerdeRequest(const MapSchemaVersionToSerdeRequest& other1138) { + schemaVersion = other1138.schemaVersion; + serdeName = other1138.serdeName; + __isset = other1138.__isset; } -MapSchemaVersionToSerdeRequest& MapSchemaVersionToSerdeRequest::operator=(const MapSchemaVersionToSerdeRequest& other1121) { - schemaVersion = other1121.schemaVersion; - serdeName = other1121.serdeName; - __isset = other1121.__isset; +MapSchemaVersionToSerdeRequest& MapSchemaVersionToSerdeRequest::operator=(const MapSchemaVersionToSerdeRequest& other1139) { + schemaVersion = other1139.schemaVersion; + serdeName = other1139.serdeName; + __isset = other1139.__isset; return *this; } void MapSchemaVersionToSerdeRequest::printTo(std::ostream& out) const { @@ -28844,9 +30200,9 @@ uint32_t SetSchemaVersionStateRequest::read(::apache::thrift::protocol::TProtoco break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1122; - xfer += iprot->readI32(ecast1122); - this->state = (SchemaVersionState::type)ecast1122; + int32_t ecast1140; + xfer += iprot->readI32(ecast1140); + this->state = (SchemaVersionState::type)ecast1140; this->__isset.state = true; } else { xfer += iprot->skip(ftype); @@ -28889,15 +30245,15 @@ void swap(SetSchemaVersionStateRequest &a, SetSchemaVersionStateRequest &b) { swap(a.__isset, b.__isset); } -SetSchemaVersionStateRequest::SetSchemaVersionStateRequest(const SetSchemaVersionStateRequest& other1123) { - schemaVersion = other1123.schemaVersion; - state = other1123.state; - __isset = other1123.__isset; +SetSchemaVersionStateRequest::SetSchemaVersionStateRequest(const SetSchemaVersionStateRequest& other1141) { + schemaVersion = other1141.schemaVersion; + state = other1141.state; + __isset = other1141.__isset; } -SetSchemaVersionStateRequest& SetSchemaVersionStateRequest::operator=(const SetSchemaVersionStateRequest& other1124) { - schemaVersion = other1124.schemaVersion; - state = other1124.state; - __isset = other1124.__isset; +SetSchemaVersionStateRequest& SetSchemaVersionStateRequest::operator=(const SetSchemaVersionStateRequest& other1142) { + schemaVersion = other1142.schemaVersion; + state = other1142.state; + __isset = other1142.__isset; return *this; } void SetSchemaVersionStateRequest::printTo(std::ostream& out) const { @@ -28978,13 +30334,13 @@ void swap(GetSerdeRequest &a, GetSerdeRequest &b) { swap(a.__isset, b.__isset); } -GetSerdeRequest::GetSerdeRequest(const GetSerdeRequest& other1125) { - serdeName = other1125.serdeName; - __isset = other1125.__isset; +GetSerdeRequest::GetSerdeRequest(const GetSerdeRequest& other1143) { + serdeName = other1143.serdeName; + __isset = other1143.__isset; } -GetSerdeRequest& GetSerdeRequest::operator=(const GetSerdeRequest& other1126) { - serdeName = other1126.serdeName; - __isset = other1126.__isset; +GetSerdeRequest& GetSerdeRequest::operator=(const GetSerdeRequest& other1144) { + serdeName = other1144.serdeName; + __isset = other1144.__isset; return *this; } void GetSerdeRequest::printTo(std::ostream& out) const { @@ -29064,13 +30420,13 @@ void swap(MetaException &a, MetaException &b) { swap(a.__isset, b.__isset); } -MetaException::MetaException(const MetaException& other1127) : TException() { - message = other1127.message; - __isset = other1127.__isset; +MetaException::MetaException(const MetaException& other1145) : TException() { + message = other1145.message; + __isset = other1145.__isset; } -MetaException& MetaException::operator=(const MetaException& other1128) { - message = other1128.message; - __isset = other1128.__isset; +MetaException& MetaException::operator=(const MetaException& other1146) { + message = other1146.message; + __isset = other1146.__isset; return *this; } void MetaException::printTo(std::ostream& out) const { @@ -29161,13 +30517,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) { swap(a.__isset, b.__isset); } -UnknownTableException::UnknownTableException(const UnknownTableException& other1129) : TException() { - message = other1129.message; - __isset = other1129.__isset; +UnknownTableException::UnknownTableException(const UnknownTableException& other1147) : TException() { + message = other1147.message; + __isset = other1147.__isset; } -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other1130) { - message = other1130.message; - __isset = other1130.__isset; +UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other1148) { + message = other1148.message; + __isset = other1148.__isset; return *this; } void UnknownTableException::printTo(std::ostream& out) const { @@ -29258,13 +30614,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) { swap(a.__isset, b.__isset); } -UnknownDBException::UnknownDBException(const UnknownDBException& other1131) : TException() { - message = other1131.message; - __isset = other1131.__isset; +UnknownDBException::UnknownDBException(const UnknownDBException& other1149) : TException() { + message = other1149.message; + __isset = other1149.__isset; } -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other1132) { - message = other1132.message; - __isset = other1132.__isset; +UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other1150) { + message = other1150.message; + __isset = other1150.__isset; return *this; } void UnknownDBException::printTo(std::ostream& out) const { @@ -29355,13 +30711,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) { swap(a.__isset, b.__isset); } -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other1133) : TException() { - message = other1133.message; - __isset = other1133.__isset; +AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other1151) : TException() { + message = other1151.message; + __isset = other1151.__isset; } -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other1134) { - message = other1134.message; - __isset = other1134.__isset; +AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other1152) { + message = other1152.message; + __isset = other1152.__isset; return *this; } void AlreadyExistsException::printTo(std::ostream& out) const { @@ -29452,13 +30808,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) { swap(a.__isset, b.__isset); } -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other1135) : TException() { - message = other1135.message; - __isset = other1135.__isset; +InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other1153) : TException() { + message = other1153.message; + __isset = other1153.__isset; } -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other1136) { - message = other1136.message; - __isset = other1136.__isset; +InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other1154) { + message = other1154.message; + __isset = other1154.__isset; return *this; } void InvalidPartitionException::printTo(std::ostream& out) const { @@ -29549,13 +30905,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) { swap(a.__isset, b.__isset); } -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other1137) : TException() { - message = other1137.message; - __isset = other1137.__isset; +UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other1155) : TException() { + message = other1155.message; + __isset = other1155.__isset; } -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other1138) { - message = other1138.message; - __isset = other1138.__isset; +UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other1156) { + message = other1156.message; + __isset = other1156.__isset; return *this; } void UnknownPartitionException::printTo(std::ostream& out) const { @@ -29646,13 +31002,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) { swap(a.__isset, b.__isset); } -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other1139) : TException() { - message = other1139.message; - __isset = other1139.__isset; +InvalidObjectException::InvalidObjectException(const InvalidObjectException& other1157) : TException() { + message = other1157.message; + __isset = other1157.__isset; } -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other1140) { - message = other1140.message; - __isset = other1140.__isset; +InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other1158) { + message = other1158.message; + __isset = other1158.__isset; return *this; } void InvalidObjectException::printTo(std::ostream& out) const { @@ -29743,13 +31099,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) { swap(a.__isset, b.__isset); } -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other1141) : TException() { - message = other1141.message; - __isset = other1141.__isset; +NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other1159) : TException() { + message = other1159.message; + __isset = other1159.__isset; } -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other1142) { - message = other1142.message; - __isset = other1142.__isset; +NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other1160) { + message = other1160.message; + __isset = other1160.__isset; return *this; } void NoSuchObjectException::printTo(std::ostream& out) const { @@ -29840,13 +31196,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) { swap(a.__isset, b.__isset); } -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other1143) : TException() { - message = other1143.message; - __isset = other1143.__isset; +InvalidOperationException::InvalidOperationException(const InvalidOperationException& other1161) : TException() { + message = other1161.message; + __isset = other1161.__isset; } -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other1144) { - message = other1144.message; - __isset = other1144.__isset; +InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other1162) { + message = other1162.message; + __isset = other1162.__isset; return *this; } void InvalidOperationException::printTo(std::ostream& out) const { @@ -29937,13 +31293,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) { swap(a.__isset, b.__isset); } -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1145) : TException() { - message = other1145.message; - __isset = other1145.__isset; +ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1163) : TException() { + message = other1163.message; + __isset = other1163.__isset; } -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1146) { - message = other1146.message; - __isset = other1146.__isset; +ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1164) { + message = other1164.message; + __isset = other1164.__isset; return *this; } void ConfigValSecurityException::printTo(std::ostream& out) const { @@ -30034,13 +31390,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) { swap(a.__isset, b.__isset); } -InvalidInputException::InvalidInputException(const InvalidInputException& other1147) : TException() { - message = other1147.message; - __isset = other1147.__isset; +InvalidInputException::InvalidInputException(const InvalidInputException& other1165) : TException() { + message = other1165.message; + __isset = other1165.__isset; } -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1148) { - message = other1148.message; - __isset = other1148.__isset; +InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1166) { + message = other1166.message; + __isset = other1166.__isset; return *this; } void InvalidInputException::printTo(std::ostream& out) const { @@ -30131,13 +31487,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) { swap(a.__isset, b.__isset); } -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1149) : TException() { - message = other1149.message; - __isset = other1149.__isset; +NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1167) : TException() { + message = other1167.message; + __isset = other1167.__isset; } -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1150) { - message = other1150.message; - __isset = other1150.__isset; +NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1168) { + message = other1168.message; + __isset = other1168.__isset; return *this; } void NoSuchTxnException::printTo(std::ostream& out) const { @@ -30228,13 +31584,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) { swap(a.__isset, b.__isset); } -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1151) : TException() { - message = other1151.message; - __isset = other1151.__isset; +TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1169) : TException() { + message = other1169.message; + __isset = other1169.__isset; } -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1152) { - message = other1152.message; - __isset = other1152.__isset; +TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1170) { + message = other1170.message; + __isset = other1170.__isset; return *this; } void TxnAbortedException::printTo(std::ostream& out) const { @@ -30325,13 +31681,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) { swap(a.__isset, b.__isset); } -TxnOpenException::TxnOpenException(const TxnOpenException& other1153) : TException() { - message = other1153.message; - __isset = other1153.__isset; +TxnOpenException::TxnOpenException(const TxnOpenException& other1171) : TException() { + message = other1171.message; + __isset = other1171.__isset; } -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1154) { - message = other1154.message; - __isset = other1154.__isset; +TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1172) { + message = other1172.message; + __isset = other1172.__isset; return *this; } void TxnOpenException::printTo(std::ostream& out) const { @@ -30422,13 +31778,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) { swap(a.__isset, b.__isset); } -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1155) : TException() { - message = other1155.message; - __isset = other1155.__isset; +NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1173) : TException() { + message = other1173.message; + __isset = other1173.__isset; } -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1156) { - message = other1156.message; - __isset = other1156.__isset; +NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1174) { + message = other1174.message; + __isset = other1174.__isset; return *this; } void NoSuchLockException::printTo(std::ostream& out) const { diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index b094831ed7..746755f73e 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -287,6 +287,18 @@ class GrantRevokeRoleRequest; class GrantRevokeRoleResponse; +class Catalog; + +class CreateCatalogRequest; + +class GetCatalogRequest; + +class GetCatalogResponse; + +class GetCatalogsResponse; + +class DropCatalogRequest; + class Database; class SerDeInfo; @@ -788,7 +800,7 @@ inline std::ostream& operator<<(std::ostream& out, const FieldSchema& obj) } typedef struct _SQLPrimaryKey__isset { - _SQLPrimaryKey__isset() : table_db(false), table_name(false), column_name(false), key_seq(false), pk_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + _SQLPrimaryKey__isset() : table_db(false), table_name(false), column_name(false), key_seq(false), pk_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false), catName(false) {} bool table_db :1; bool table_name :1; bool column_name :1; @@ -797,6 +809,7 @@ typedef struct _SQLPrimaryKey__isset { bool enable_cstr :1; bool validate_cstr :1; bool rely_cstr :1; + bool catName :1; } _SQLPrimaryKey__isset; class SQLPrimaryKey { @@ -804,7 +817,7 @@ class SQLPrimaryKey { SQLPrimaryKey(const SQLPrimaryKey&); SQLPrimaryKey& operator=(const SQLPrimaryKey&); - SQLPrimaryKey() : table_db(), table_name(), column_name(), key_seq(0), pk_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { + SQLPrimaryKey() : table_db(), table_name(), column_name(), key_seq(0), pk_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0), catName() { } virtual ~SQLPrimaryKey() throw(); @@ -816,6 +829,7 @@ class SQLPrimaryKey { bool enable_cstr; bool validate_cstr; bool rely_cstr; + std::string catName; _SQLPrimaryKey__isset __isset; @@ -835,6 +849,8 @@ class SQLPrimaryKey { void __set_rely_cstr(const bool val); + void __set_catName(const std::string& val); + bool operator == (const SQLPrimaryKey & rhs) const { if (!(table_db == rhs.table_db)) @@ -853,6 +869,10 @@ class SQLPrimaryKey { return false; if (!(rely_cstr == rhs.rely_cstr)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const SQLPrimaryKey &rhs) const { @@ -876,7 +896,7 @@ inline std::ostream& operator<<(std::ostream& out, const SQLPrimaryKey& obj) } typedef struct _SQLForeignKey__isset { - _SQLForeignKey__isset() : pktable_db(false), pktable_name(false), pkcolumn_name(false), fktable_db(false), fktable_name(false), fkcolumn_name(false), key_seq(false), update_rule(false), delete_rule(false), fk_name(false), pk_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + _SQLForeignKey__isset() : pktable_db(false), pktable_name(false), pkcolumn_name(false), fktable_db(false), fktable_name(false), fkcolumn_name(false), key_seq(false), update_rule(false), delete_rule(false), fk_name(false), pk_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false), catName(false) {} bool pktable_db :1; bool pktable_name :1; bool pkcolumn_name :1; @@ -891,6 +911,7 @@ typedef struct _SQLForeignKey__isset { bool enable_cstr :1; bool validate_cstr :1; bool rely_cstr :1; + bool catName :1; } _SQLForeignKey__isset; class SQLForeignKey { @@ -898,7 +919,7 @@ class SQLForeignKey { SQLForeignKey(const SQLForeignKey&); SQLForeignKey& operator=(const SQLForeignKey&); - SQLForeignKey() : pktable_db(), pktable_name(), pkcolumn_name(), fktable_db(), fktable_name(), fkcolumn_name(), key_seq(0), update_rule(0), delete_rule(0), fk_name(), pk_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { + SQLForeignKey() : pktable_db(), pktable_name(), pkcolumn_name(), fktable_db(), fktable_name(), fkcolumn_name(), key_seq(0), update_rule(0), delete_rule(0), fk_name(), pk_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0), catName() { } virtual ~SQLForeignKey() throw(); @@ -916,6 +937,7 @@ class SQLForeignKey { bool enable_cstr; bool validate_cstr; bool rely_cstr; + std::string catName; _SQLForeignKey__isset __isset; @@ -947,6 +969,8 @@ class SQLForeignKey { void __set_rely_cstr(const bool val); + void __set_catName(const std::string& val); + bool operator == (const SQLForeignKey & rhs) const { if (!(pktable_db == rhs.pktable_db)) @@ -977,6 +1001,10 @@ class SQLForeignKey { return false; if (!(rely_cstr == rhs.rely_cstr)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const SQLForeignKey &rhs) const { @@ -1000,7 +1028,8 @@ inline std::ostream& operator<<(std::ostream& out, const SQLForeignKey& obj) } typedef struct _SQLUniqueConstraint__isset { - _SQLUniqueConstraint__isset() : table_db(false), table_name(false), column_name(false), key_seq(false), uk_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + _SQLUniqueConstraint__isset() : catName(false), table_db(false), table_name(false), column_name(false), key_seq(false), uk_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + bool catName :1; bool table_db :1; bool table_name :1; bool column_name :1; @@ -1016,10 +1045,11 @@ class SQLUniqueConstraint { SQLUniqueConstraint(const SQLUniqueConstraint&); SQLUniqueConstraint& operator=(const SQLUniqueConstraint&); - SQLUniqueConstraint() : table_db(), table_name(), column_name(), key_seq(0), uk_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { + SQLUniqueConstraint() : catName(), table_db(), table_name(), column_name(), key_seq(0), uk_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { } virtual ~SQLUniqueConstraint() throw(); + std::string catName; std::string table_db; std::string table_name; std::string column_name; @@ -1031,6 +1061,8 @@ class SQLUniqueConstraint { _SQLUniqueConstraint__isset __isset; + void __set_catName(const std::string& val); + void __set_table_db(const std::string& val); void __set_table_name(const std::string& val); @@ -1049,6 +1081,8 @@ class SQLUniqueConstraint { bool operator == (const SQLUniqueConstraint & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(table_db == rhs.table_db)) return false; if (!(table_name == rhs.table_name)) @@ -1088,7 +1122,8 @@ inline std::ostream& operator<<(std::ostream& out, const SQLUniqueConstraint& ob } typedef struct _SQLNotNullConstraint__isset { - _SQLNotNullConstraint__isset() : table_db(false), table_name(false), column_name(false), nn_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + _SQLNotNullConstraint__isset() : catName(false), table_db(false), table_name(false), column_name(false), nn_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + bool catName :1; bool table_db :1; bool table_name :1; bool column_name :1; @@ -1103,10 +1138,11 @@ class SQLNotNullConstraint { SQLNotNullConstraint(const SQLNotNullConstraint&); SQLNotNullConstraint& operator=(const SQLNotNullConstraint&); - SQLNotNullConstraint() : table_db(), table_name(), column_name(), nn_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { + SQLNotNullConstraint() : catName(), table_db(), table_name(), column_name(), nn_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { } virtual ~SQLNotNullConstraint() throw(); + std::string catName; std::string table_db; std::string table_name; std::string column_name; @@ -1117,6 +1153,8 @@ class SQLNotNullConstraint { _SQLNotNullConstraint__isset __isset; + void __set_catName(const std::string& val); + void __set_table_db(const std::string& val); void __set_table_name(const std::string& val); @@ -1133,6 +1171,8 @@ class SQLNotNullConstraint { bool operator == (const SQLNotNullConstraint & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(table_db == rhs.table_db)) return false; if (!(table_name == rhs.table_name)) @@ -1170,7 +1210,8 @@ inline std::ostream& operator<<(std::ostream& out, const SQLNotNullConstraint& o } typedef struct _SQLDefaultConstraint__isset { - _SQLDefaultConstraint__isset() : table_db(false), table_name(false), column_name(false), default_value(false), dc_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + _SQLDefaultConstraint__isset() : catName(false), table_db(false), table_name(false), column_name(false), default_value(false), dc_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + bool catName :1; bool table_db :1; bool table_name :1; bool column_name :1; @@ -1186,10 +1227,11 @@ class SQLDefaultConstraint { SQLDefaultConstraint(const SQLDefaultConstraint&); SQLDefaultConstraint& operator=(const SQLDefaultConstraint&); - SQLDefaultConstraint() : table_db(), table_name(), column_name(), default_value(), dc_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { + SQLDefaultConstraint() : catName(), table_db(), table_name(), column_name(), default_value(), dc_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { } virtual ~SQLDefaultConstraint() throw(); + std::string catName; std::string table_db; std::string table_name; std::string column_name; @@ -1201,6 +1243,8 @@ class SQLDefaultConstraint { _SQLDefaultConstraint__isset __isset; + void __set_catName(const std::string& val); + void __set_table_db(const std::string& val); void __set_table_name(const std::string& val); @@ -1219,6 +1263,8 @@ class SQLDefaultConstraint { bool operator == (const SQLDefaultConstraint & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(table_db == rhs.table_db)) return false; if (!(table_name == rhs.table_name)) @@ -1258,7 +1304,8 @@ inline std::ostream& operator<<(std::ostream& out, const SQLDefaultConstraint& o } typedef struct _SQLCheckConstraint__isset { - _SQLCheckConstraint__isset() : table_db(false), table_name(false), column_name(false), check_expression(false), dc_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + _SQLCheckConstraint__isset() : catName(false), table_db(false), table_name(false), column_name(false), check_expression(false), dc_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + bool catName :1; bool table_db :1; bool table_name :1; bool column_name :1; @@ -1274,10 +1321,11 @@ class SQLCheckConstraint { SQLCheckConstraint(const SQLCheckConstraint&); SQLCheckConstraint& operator=(const SQLCheckConstraint&); - SQLCheckConstraint() : table_db(), table_name(), column_name(), check_expression(), dc_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { + SQLCheckConstraint() : catName(), table_db(), table_name(), column_name(), check_expression(), dc_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { } virtual ~SQLCheckConstraint() throw(); + std::string catName; std::string table_db; std::string table_name; std::string column_name; @@ -1289,6 +1337,8 @@ class SQLCheckConstraint { _SQLCheckConstraint__isset __isset; + void __set_catName(const std::string& val); + void __set_table_db(const std::string& val); void __set_table_name(const std::string& val); @@ -1307,6 +1357,8 @@ class SQLCheckConstraint { bool operator == (const SQLCheckConstraint & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(table_db == rhs.table_db)) return false; if (!(table_name == rhs.table_name)) @@ -1416,12 +1468,13 @@ inline std::ostream& operator<<(std::ostream& out, const Type& obj) } typedef struct _HiveObjectRef__isset { - _HiveObjectRef__isset() : objectType(false), dbName(false), objectName(false), partValues(false), columnName(false) {} + _HiveObjectRef__isset() : objectType(false), dbName(false), objectName(false), partValues(false), columnName(false), catName(false) {} bool objectType :1; bool dbName :1; bool objectName :1; bool partValues :1; bool columnName :1; + bool catName :1; } _HiveObjectRef__isset; class HiveObjectRef { @@ -1429,7 +1482,7 @@ class HiveObjectRef { HiveObjectRef(const HiveObjectRef&); HiveObjectRef& operator=(const HiveObjectRef&); - HiveObjectRef() : objectType((HiveObjectType::type)0), dbName(), objectName(), columnName() { + HiveObjectRef() : objectType((HiveObjectType::type)0), dbName(), objectName(), columnName(), catName() { } virtual ~HiveObjectRef() throw(); @@ -1438,6 +1491,7 @@ class HiveObjectRef { std::string objectName; std::vector partValues; std::string columnName; + std::string catName; _HiveObjectRef__isset __isset; @@ -1451,6 +1505,8 @@ class HiveObjectRef { void __set_columnName(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const HiveObjectRef & rhs) const { if (!(objectType == rhs.objectType)) @@ -1463,6 +1519,10 @@ class HiveObjectRef { return false; if (!(columnName == rhs.columnName)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const HiveObjectRef &rhs) const { @@ -2272,8 +2332,298 @@ inline std::ostream& operator<<(std::ostream& out, const GrantRevokeRoleResponse return out; } +typedef struct _Catalog__isset { + _Catalog__isset() : name(false), description(false), locationUri(false) {} + bool name :1; + bool description :1; + bool locationUri :1; +} _Catalog__isset; + +class Catalog { + public: + + Catalog(const Catalog&); + Catalog& operator=(const Catalog&); + Catalog() : name(), description(), locationUri() { + } + + virtual ~Catalog() throw(); + std::string name; + std::string description; + std::string locationUri; + + _Catalog__isset __isset; + + void __set_name(const std::string& val); + + void __set_description(const std::string& val); + + void __set_locationUri(const std::string& val); + + bool operator == (const Catalog & rhs) const + { + if (!(name == rhs.name)) + return false; + if (__isset.description != rhs.__isset.description) + return false; + else if (__isset.description && !(description == rhs.description)) + return false; + if (!(locationUri == rhs.locationUri)) + return false; + return true; + } + bool operator != (const Catalog &rhs) const { + return !(*this == rhs); + } + + bool operator < (const Catalog & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(Catalog &a, Catalog &b); + +inline std::ostream& operator<<(std::ostream& out, const Catalog& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _CreateCatalogRequest__isset { + _CreateCatalogRequest__isset() : catalog(false) {} + bool catalog :1; +} _CreateCatalogRequest__isset; + +class CreateCatalogRequest { + public: + + CreateCatalogRequest(const CreateCatalogRequest&); + CreateCatalogRequest& operator=(const CreateCatalogRequest&); + CreateCatalogRequest() { + } + + virtual ~CreateCatalogRequest() throw(); + Catalog catalog; + + _CreateCatalogRequest__isset __isset; + + void __set_catalog(const Catalog& val); + + bool operator == (const CreateCatalogRequest & rhs) const + { + if (!(catalog == rhs.catalog)) + return false; + return true; + } + bool operator != (const CreateCatalogRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const CreateCatalogRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(CreateCatalogRequest &a, CreateCatalogRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const CreateCatalogRequest& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _GetCatalogRequest__isset { + _GetCatalogRequest__isset() : name(false) {} + bool name :1; +} _GetCatalogRequest__isset; + +class GetCatalogRequest { + public: + + GetCatalogRequest(const GetCatalogRequest&); + GetCatalogRequest& operator=(const GetCatalogRequest&); + GetCatalogRequest() : name() { + } + + virtual ~GetCatalogRequest() throw(); + std::string name; + + _GetCatalogRequest__isset __isset; + + void __set_name(const std::string& val); + + bool operator == (const GetCatalogRequest & rhs) const + { + if (!(name == rhs.name)) + return false; + return true; + } + bool operator != (const GetCatalogRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetCatalogRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(GetCatalogRequest &a, GetCatalogRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const GetCatalogRequest& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _GetCatalogResponse__isset { + _GetCatalogResponse__isset() : catalog(false) {} + bool catalog :1; +} _GetCatalogResponse__isset; + +class GetCatalogResponse { + public: + + GetCatalogResponse(const GetCatalogResponse&); + GetCatalogResponse& operator=(const GetCatalogResponse&); + GetCatalogResponse() { + } + + virtual ~GetCatalogResponse() throw(); + Catalog catalog; + + _GetCatalogResponse__isset __isset; + + void __set_catalog(const Catalog& val); + + bool operator == (const GetCatalogResponse & rhs) const + { + if (!(catalog == rhs.catalog)) + return false; + return true; + } + bool operator != (const GetCatalogResponse &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetCatalogResponse & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(GetCatalogResponse &a, GetCatalogResponse &b); + +inline std::ostream& operator<<(std::ostream& out, const GetCatalogResponse& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _GetCatalogsResponse__isset { + _GetCatalogsResponse__isset() : names(false) {} + bool names :1; +} _GetCatalogsResponse__isset; + +class GetCatalogsResponse { + public: + + GetCatalogsResponse(const GetCatalogsResponse&); + GetCatalogsResponse& operator=(const GetCatalogsResponse&); + GetCatalogsResponse() { + } + + virtual ~GetCatalogsResponse() throw(); + std::vector names; + + _GetCatalogsResponse__isset __isset; + + void __set_names(const std::vector & val); + + bool operator == (const GetCatalogsResponse & rhs) const + { + if (!(names == rhs.names)) + return false; + return true; + } + bool operator != (const GetCatalogsResponse &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetCatalogsResponse & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(GetCatalogsResponse &a, GetCatalogsResponse &b); + +inline std::ostream& operator<<(std::ostream& out, const GetCatalogsResponse& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _DropCatalogRequest__isset { + _DropCatalogRequest__isset() : name(false) {} + bool name :1; +} _DropCatalogRequest__isset; + +class DropCatalogRequest { + public: + + DropCatalogRequest(const DropCatalogRequest&); + DropCatalogRequest& operator=(const DropCatalogRequest&); + DropCatalogRequest() : name() { + } + + virtual ~DropCatalogRequest() throw(); + std::string name; + + _DropCatalogRequest__isset __isset; + + void __set_name(const std::string& val); + + bool operator == (const DropCatalogRequest & rhs) const + { + if (!(name == rhs.name)) + return false; + return true; + } + bool operator != (const DropCatalogRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const DropCatalogRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(DropCatalogRequest &a, DropCatalogRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const DropCatalogRequest& obj) +{ + obj.printTo(out); + return out; +} + typedef struct _Database__isset { - _Database__isset() : name(false), description(false), locationUri(false), parameters(false), privileges(false), ownerName(false), ownerType(false) {} + _Database__isset() : name(false), description(false), locationUri(false), parameters(false), privileges(false), ownerName(false), ownerType(false), catalogName(false) {} bool name :1; bool description :1; bool locationUri :1; @@ -2281,6 +2631,7 @@ typedef struct _Database__isset { bool privileges :1; bool ownerName :1; bool ownerType :1; + bool catalogName :1; } _Database__isset; class Database { @@ -2288,7 +2639,7 @@ class Database { Database(const Database&); Database& operator=(const Database&); - Database() : name(), description(), locationUri(), ownerName(), ownerType((PrincipalType::type)0) { + Database() : name(), description(), locationUri(), ownerName(), ownerType((PrincipalType::type)0), catalogName() { } virtual ~Database() throw(); @@ -2299,6 +2650,7 @@ class Database { PrincipalPrivilegeSet privileges; std::string ownerName; PrincipalType::type ownerType; + std::string catalogName; _Database__isset __isset; @@ -2316,6 +2668,8 @@ class Database { void __set_ownerType(const PrincipalType::type val); + void __set_catalogName(const std::string& val); + bool operator == (const Database & rhs) const { if (!(name == rhs.name)) @@ -2338,6 +2692,10 @@ class Database { return false; else if (__isset.ownerType && !(ownerType == rhs.ownerType)) return false; + if (__isset.catalogName != rhs.__isset.catalogName) + return false; + else if (__isset.catalogName && !(catalogName == rhs.catalogName)) + return false; return true; } bool operator != (const Database &rhs) const { @@ -2677,7 +3035,7 @@ inline std::ostream& operator<<(std::ostream& out, const StorageDescriptor& obj) } typedef struct _Table__isset { - _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false) {} + _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false), catName(false) {} bool tableName :1; bool dbName :1; bool owner :1; @@ -2694,6 +3052,7 @@ typedef struct _Table__isset { bool temporary :1; bool rewriteEnabled :1; bool creationMetadata :1; + bool catName :1; } _Table__isset; class Table { @@ -2701,7 +3060,7 @@ class Table { Table(const Table&); Table& operator=(const Table&); - Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0) { + Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0), catName() { } virtual ~Table() throw(); @@ -2721,6 +3080,7 @@ class Table { bool temporary; bool rewriteEnabled; CreationMetadata creationMetadata; + std::string catName; _Table__isset __isset; @@ -2756,6 +3116,8 @@ class Table { void __set_creationMetadata(const CreationMetadata& val); + void __set_catName(const std::string& val); + bool operator == (const Table & rhs) const { if (!(tableName == rhs.tableName)) @@ -2798,6 +3160,10 @@ class Table { return false; else if (__isset.creationMetadata && !(creationMetadata == rhs.creationMetadata)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const Table &rhs) const { @@ -2821,7 +3187,7 @@ inline std::ostream& operator<<(std::ostream& out, const Table& obj) } typedef struct _Partition__isset { - _Partition__isset() : values(false), dbName(false), tableName(false), createTime(false), lastAccessTime(false), sd(false), parameters(false), privileges(false) {} + _Partition__isset() : values(false), dbName(false), tableName(false), createTime(false), lastAccessTime(false), sd(false), parameters(false), privileges(false), catName(false) {} bool values :1; bool dbName :1; bool tableName :1; @@ -2830,6 +3196,7 @@ typedef struct _Partition__isset { bool sd :1; bool parameters :1; bool privileges :1; + bool catName :1; } _Partition__isset; class Partition { @@ -2837,7 +3204,7 @@ class Partition { Partition(const Partition&); Partition& operator=(const Partition&); - Partition() : dbName(), tableName(), createTime(0), lastAccessTime(0) { + Partition() : dbName(), tableName(), createTime(0), lastAccessTime(0), catName() { } virtual ~Partition() throw(); @@ -2849,6 +3216,7 @@ class Partition { StorageDescriptor sd; std::map parameters; PrincipalPrivilegeSet privileges; + std::string catName; _Partition__isset __isset; @@ -2868,6 +3236,8 @@ class Partition { void __set_privileges(const PrincipalPrivilegeSet& val); + void __set_catName(const std::string& val); + bool operator == (const Partition & rhs) const { if (!(values == rhs.values)) @@ -2888,6 +3258,10 @@ class Partition { return false; else if (__isset.privileges && !(privileges == rhs.privileges)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const Partition &rhs) const { @@ -3087,12 +3461,13 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionListComposingS } typedef struct _PartitionSpec__isset { - _PartitionSpec__isset() : dbName(false), tableName(false), rootPath(false), sharedSDPartitionSpec(false), partitionList(false) {} + _PartitionSpec__isset() : dbName(false), tableName(false), rootPath(false), sharedSDPartitionSpec(false), partitionList(false), catName(false) {} bool dbName :1; bool tableName :1; bool rootPath :1; bool sharedSDPartitionSpec :1; bool partitionList :1; + bool catName :1; } _PartitionSpec__isset; class PartitionSpec { @@ -3100,7 +3475,7 @@ class PartitionSpec { PartitionSpec(const PartitionSpec&); PartitionSpec& operator=(const PartitionSpec&); - PartitionSpec() : dbName(), tableName(), rootPath() { + PartitionSpec() : dbName(), tableName(), rootPath(), catName() { } virtual ~PartitionSpec() throw(); @@ -3109,6 +3484,7 @@ class PartitionSpec { std::string rootPath; PartitionSpecWithSharedSD sharedSDPartitionSpec; PartitionListComposingSpec partitionList; + std::string catName; _PartitionSpec__isset __isset; @@ -3122,6 +3498,8 @@ class PartitionSpec { void __set_partitionList(const PartitionListComposingSpec& val); + void __set_catName(const std::string& val); + bool operator == (const PartitionSpec & rhs) const { if (!(dbName == rhs.dbName)) @@ -3138,6 +3516,10 @@ class PartitionSpec { return false; else if (__isset.partitionList && !(partitionList == rhs.partitionList)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const PartitionSpec &rhs) const { @@ -3868,9 +4250,10 @@ inline std::ostream& operator<<(std::ostream& out, const ColumnStatisticsObj& ob } typedef struct _ColumnStatisticsDesc__isset { - _ColumnStatisticsDesc__isset() : partName(false), lastAnalyzed(false) {} + _ColumnStatisticsDesc__isset() : partName(false), lastAnalyzed(false), catName(false) {} bool partName :1; bool lastAnalyzed :1; + bool catName :1; } _ColumnStatisticsDesc__isset; class ColumnStatisticsDesc { @@ -3878,7 +4261,7 @@ class ColumnStatisticsDesc { ColumnStatisticsDesc(const ColumnStatisticsDesc&); ColumnStatisticsDesc& operator=(const ColumnStatisticsDesc&); - ColumnStatisticsDesc() : isTblLevel(0), dbName(), tableName(), partName(), lastAnalyzed(0) { + ColumnStatisticsDesc() : isTblLevel(0), dbName(), tableName(), partName(), lastAnalyzed(0), catName() { } virtual ~ColumnStatisticsDesc() throw(); @@ -3887,6 +4270,7 @@ class ColumnStatisticsDesc { std::string tableName; std::string partName; int64_t lastAnalyzed; + std::string catName; _ColumnStatisticsDesc__isset __isset; @@ -3900,6 +4284,8 @@ class ColumnStatisticsDesc { void __set_lastAnalyzed(const int64_t val); + void __set_catName(const std::string& val); + bool operator == (const ColumnStatisticsDesc & rhs) const { if (!(isTblLevel == rhs.isTblLevel)) @@ -3916,6 +4302,10 @@ class ColumnStatisticsDesc { return false; else if (__isset.lastAnalyzed && !(lastAnalyzed == rhs.lastAnalyzed)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const ColumnStatisticsDesc &rhs) const { @@ -4179,29 +4569,42 @@ inline std::ostream& operator<<(std::ostream& out, const EnvironmentContext& obj return out; } +typedef struct _PrimaryKeysRequest__isset { + _PrimaryKeysRequest__isset() : catName(false) {} + bool catName :1; +} _PrimaryKeysRequest__isset; class PrimaryKeysRequest { public: PrimaryKeysRequest(const PrimaryKeysRequest&); PrimaryKeysRequest& operator=(const PrimaryKeysRequest&); - PrimaryKeysRequest() : db_name(), tbl_name() { + PrimaryKeysRequest() : db_name(), tbl_name(), catName() { } virtual ~PrimaryKeysRequest() throw(); std::string db_name; std::string tbl_name; + std::string catName; + + _PrimaryKeysRequest__isset __isset; void __set_db_name(const std::string& val); void __set_tbl_name(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const PrimaryKeysRequest & rhs) const { if (!(db_name == rhs.db_name)) return false; if (!(tbl_name == rhs.tbl_name)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const PrimaryKeysRequest &rhs) const { @@ -4265,11 +4668,12 @@ inline std::ostream& operator<<(std::ostream& out, const PrimaryKeysResponse& ob } typedef struct _ForeignKeysRequest__isset { - _ForeignKeysRequest__isset() : parent_db_name(false), parent_tbl_name(false), foreign_db_name(false), foreign_tbl_name(false) {} + _ForeignKeysRequest__isset() : parent_db_name(false), parent_tbl_name(false), foreign_db_name(false), foreign_tbl_name(false), catName(false) {} bool parent_db_name :1; bool parent_tbl_name :1; bool foreign_db_name :1; bool foreign_tbl_name :1; + bool catName :1; } _ForeignKeysRequest__isset; class ForeignKeysRequest { @@ -4277,7 +4681,7 @@ class ForeignKeysRequest { ForeignKeysRequest(const ForeignKeysRequest&); ForeignKeysRequest& operator=(const ForeignKeysRequest&); - ForeignKeysRequest() : parent_db_name(), parent_tbl_name(), foreign_db_name(), foreign_tbl_name() { + ForeignKeysRequest() : parent_db_name(), parent_tbl_name(), foreign_db_name(), foreign_tbl_name(), catName() { } virtual ~ForeignKeysRequest() throw(); @@ -4285,6 +4689,7 @@ class ForeignKeysRequest { std::string parent_tbl_name; std::string foreign_db_name; std::string foreign_tbl_name; + std::string catName; _ForeignKeysRequest__isset __isset; @@ -4296,6 +4701,8 @@ class ForeignKeysRequest { void __set_foreign_tbl_name(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const ForeignKeysRequest & rhs) const { if (!(parent_db_name == rhs.parent_db_name)) @@ -4306,6 +4713,10 @@ class ForeignKeysRequest { return false; if (!(foreign_tbl_name == rhs.foreign_tbl_name)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const ForeignKeysRequest &rhs) const { @@ -4374,19 +4785,24 @@ class UniqueConstraintsRequest { UniqueConstraintsRequest(const UniqueConstraintsRequest&); UniqueConstraintsRequest& operator=(const UniqueConstraintsRequest&); - UniqueConstraintsRequest() : db_name(), tbl_name() { + UniqueConstraintsRequest() : catName(), db_name(), tbl_name() { } virtual ~UniqueConstraintsRequest() throw(); + std::string catName; std::string db_name; std::string tbl_name; + void __set_catName(const std::string& val); + void __set_db_name(const std::string& val); void __set_tbl_name(const std::string& val); bool operator == (const UniqueConstraintsRequest & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(db_name == rhs.db_name)) return false; if (!(tbl_name == rhs.tbl_name)) @@ -4459,19 +4875,24 @@ class NotNullConstraintsRequest { NotNullConstraintsRequest(const NotNullConstraintsRequest&); NotNullConstraintsRequest& operator=(const NotNullConstraintsRequest&); - NotNullConstraintsRequest() : db_name(), tbl_name() { + NotNullConstraintsRequest() : catName(), db_name(), tbl_name() { } virtual ~NotNullConstraintsRequest() throw(); + std::string catName; std::string db_name; std::string tbl_name; + void __set_catName(const std::string& val); + void __set_db_name(const std::string& val); void __set_tbl_name(const std::string& val); bool operator == (const NotNullConstraintsRequest & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(db_name == rhs.db_name)) return false; if (!(tbl_name == rhs.tbl_name)) @@ -4544,19 +4965,24 @@ class DefaultConstraintsRequest { DefaultConstraintsRequest(const DefaultConstraintsRequest&); DefaultConstraintsRequest& operator=(const DefaultConstraintsRequest&); - DefaultConstraintsRequest() : db_name(), tbl_name() { + DefaultConstraintsRequest() : catName(), db_name(), tbl_name() { } virtual ~DefaultConstraintsRequest() throw(); + std::string catName; std::string db_name; std::string tbl_name; + void __set_catName(const std::string& val); + void __set_db_name(const std::string& val); void __set_tbl_name(const std::string& val); bool operator == (const DefaultConstraintsRequest & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(db_name == rhs.db_name)) return false; if (!(tbl_name == rhs.tbl_name)) @@ -4629,19 +5055,24 @@ class CheckConstraintsRequest { CheckConstraintsRequest(const CheckConstraintsRequest&); CheckConstraintsRequest& operator=(const CheckConstraintsRequest&); - CheckConstraintsRequest() : db_name(), tbl_name() { + CheckConstraintsRequest() : catName(), db_name(), tbl_name() { } virtual ~CheckConstraintsRequest() throw(); + std::string catName; std::string db_name; std::string tbl_name; + void __set_catName(const std::string& val); + void __set_db_name(const std::string& val); void __set_tbl_name(const std::string& val); bool operator == (const CheckConstraintsRequest & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(db_name == rhs.db_name)) return false; if (!(tbl_name == rhs.tbl_name)) @@ -4708,19 +5139,26 @@ inline std::ostream& operator<<(std::ostream& out, const CheckConstraintsRespons return out; } +typedef struct _DropConstraintRequest__isset { + _DropConstraintRequest__isset() : catName(false) {} + bool catName :1; +} _DropConstraintRequest__isset; class DropConstraintRequest { public: DropConstraintRequest(const DropConstraintRequest&); DropConstraintRequest& operator=(const DropConstraintRequest&); - DropConstraintRequest() : dbname(), tablename(), constraintname() { + DropConstraintRequest() : dbname(), tablename(), constraintname(), catName() { } virtual ~DropConstraintRequest() throw(); std::string dbname; std::string tablename; std::string constraintname; + std::string catName; + + _DropConstraintRequest__isset __isset; void __set_dbname(const std::string& val); @@ -4728,6 +5166,8 @@ class DropConstraintRequest { void __set_constraintname(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const DropConstraintRequest & rhs) const { if (!(dbname == rhs.dbname)) @@ -4736,6 +5176,10 @@ class DropConstraintRequest { return false; if (!(constraintname == rhs.constraintname)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const DropConstraintRequest &rhs) const { @@ -5044,9 +5488,10 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionsByExprResult& } typedef struct _PartitionsByExprRequest__isset { - _PartitionsByExprRequest__isset() : defaultPartitionName(false), maxParts(true) {} + _PartitionsByExprRequest__isset() : defaultPartitionName(false), maxParts(true), catName(false) {} bool defaultPartitionName :1; bool maxParts :1; + bool catName :1; } _PartitionsByExprRequest__isset; class PartitionsByExprRequest { @@ -5054,7 +5499,7 @@ class PartitionsByExprRequest { PartitionsByExprRequest(const PartitionsByExprRequest&); PartitionsByExprRequest& operator=(const PartitionsByExprRequest&); - PartitionsByExprRequest() : dbName(), tblName(), expr(), defaultPartitionName(), maxParts(-1) { + PartitionsByExprRequest() : dbName(), tblName(), expr(), defaultPartitionName(), maxParts(-1), catName() { } virtual ~PartitionsByExprRequest() throw(); @@ -5063,6 +5508,7 @@ class PartitionsByExprRequest { std::string expr; std::string defaultPartitionName; int16_t maxParts; + std::string catName; _PartitionsByExprRequest__isset __isset; @@ -5076,6 +5522,8 @@ class PartitionsByExprRequest { void __set_maxParts(const int16_t val); + void __set_catName(const std::string& val); + bool operator == (const PartitionsByExprRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5092,6 +5540,10 @@ class PartitionsByExprRequest { return false; else if (__isset.maxParts && !(maxParts == rhs.maxParts)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const PartitionsByExprRequest &rhs) const { @@ -5194,19 +5646,26 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionsStatsResult& return out; } +typedef struct _TableStatsRequest__isset { + _TableStatsRequest__isset() : catName(false) {} + bool catName :1; +} _TableStatsRequest__isset; class TableStatsRequest { public: TableStatsRequest(const TableStatsRequest&); TableStatsRequest& operator=(const TableStatsRequest&); - TableStatsRequest() : dbName(), tblName() { + TableStatsRequest() : dbName(), tblName(), catName() { } virtual ~TableStatsRequest() throw(); std::string dbName; std::string tblName; std::vector colNames; + std::string catName; + + _TableStatsRequest__isset __isset; void __set_dbName(const std::string& val); @@ -5214,6 +5673,8 @@ class TableStatsRequest { void __set_colNames(const std::vector & val); + void __set_catName(const std::string& val); + bool operator == (const TableStatsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5222,6 +5683,10 @@ class TableStatsRequest { return false; if (!(colNames == rhs.colNames)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const TableStatsRequest &rhs) const { @@ -5244,13 +5709,17 @@ inline std::ostream& operator<<(std::ostream& out, const TableStatsRequest& obj) return out; } +typedef struct _PartitionsStatsRequest__isset { + _PartitionsStatsRequest__isset() : catName(false) {} + bool catName :1; +} _PartitionsStatsRequest__isset; class PartitionsStatsRequest { public: PartitionsStatsRequest(const PartitionsStatsRequest&); PartitionsStatsRequest& operator=(const PartitionsStatsRequest&); - PartitionsStatsRequest() : dbName(), tblName() { + PartitionsStatsRequest() : dbName(), tblName(), catName() { } virtual ~PartitionsStatsRequest() throw(); @@ -5258,6 +5727,9 @@ class PartitionsStatsRequest { std::string tblName; std::vector colNames; std::vector partNames; + std::string catName; + + _PartitionsStatsRequest__isset __isset; void __set_dbName(const std::string& val); @@ -5267,6 +5739,8 @@ class PartitionsStatsRequest { void __set_partNames(const std::vector & val); + void __set_catName(const std::string& val); + bool operator == (const PartitionsStatsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5277,6 +5751,10 @@ class PartitionsStatsRequest { return false; if (!(partNames == rhs.partNames)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const PartitionsStatsRequest &rhs) const { @@ -5348,8 +5826,9 @@ inline std::ostream& operator<<(std::ostream& out, const AddPartitionsResult& ob } typedef struct _AddPartitionsRequest__isset { - _AddPartitionsRequest__isset() : needResult(true) {} + _AddPartitionsRequest__isset() : needResult(true), catName(false) {} bool needResult :1; + bool catName :1; } _AddPartitionsRequest__isset; class AddPartitionsRequest { @@ -5357,7 +5836,7 @@ class AddPartitionsRequest { AddPartitionsRequest(const AddPartitionsRequest&); AddPartitionsRequest& operator=(const AddPartitionsRequest&); - AddPartitionsRequest() : dbName(), tblName(), ifNotExists(0), needResult(true) { + AddPartitionsRequest() : dbName(), tblName(), ifNotExists(0), needResult(true), catName() { } virtual ~AddPartitionsRequest() throw(); @@ -5366,6 +5845,7 @@ class AddPartitionsRequest { std::vector parts; bool ifNotExists; bool needResult; + std::string catName; _AddPartitionsRequest__isset __isset; @@ -5379,6 +5859,8 @@ class AddPartitionsRequest { void __set_needResult(const bool val); + void __set_catName(const std::string& val); + bool operator == (const AddPartitionsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5393,6 +5875,10 @@ class AddPartitionsRequest { return false; else if (__isset.needResult && !(needResult == rhs.needResult)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const AddPartitionsRequest &rhs) const { @@ -5569,12 +6055,13 @@ inline std::ostream& operator<<(std::ostream& out, const RequestPartsSpec& obj) } typedef struct _DropPartitionsRequest__isset { - _DropPartitionsRequest__isset() : deleteData(false), ifExists(true), ignoreProtection(false), environmentContext(false), needResult(true) {} + _DropPartitionsRequest__isset() : deleteData(false), ifExists(true), ignoreProtection(false), environmentContext(false), needResult(true), catName(false) {} bool deleteData :1; bool ifExists :1; bool ignoreProtection :1; bool environmentContext :1; bool needResult :1; + bool catName :1; } _DropPartitionsRequest__isset; class DropPartitionsRequest { @@ -5582,7 +6069,7 @@ class DropPartitionsRequest { DropPartitionsRequest(const DropPartitionsRequest&); DropPartitionsRequest& operator=(const DropPartitionsRequest&); - DropPartitionsRequest() : dbName(), tblName(), deleteData(0), ifExists(true), ignoreProtection(0), needResult(true) { + DropPartitionsRequest() : dbName(), tblName(), deleteData(0), ifExists(true), ignoreProtection(0), needResult(true), catName() { } virtual ~DropPartitionsRequest() throw(); @@ -5594,6 +6081,7 @@ class DropPartitionsRequest { bool ignoreProtection; EnvironmentContext environmentContext; bool needResult; + std::string catName; _DropPartitionsRequest__isset __isset; @@ -5613,6 +6101,8 @@ class DropPartitionsRequest { void __set_needResult(const bool val); + void __set_catName(const std::string& val); + bool operator == (const DropPartitionsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5641,6 +6131,10 @@ class DropPartitionsRequest { return false; else if (__isset.needResult && !(needResult == rhs.needResult)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const DropPartitionsRequest &rhs) const { @@ -5664,12 +6158,13 @@ inline std::ostream& operator<<(std::ostream& out, const DropPartitionsRequest& } typedef struct _PartitionValuesRequest__isset { - _PartitionValuesRequest__isset() : applyDistinct(true), filter(false), partitionOrder(false), ascending(true), maxParts(true) {} + _PartitionValuesRequest__isset() : applyDistinct(true), filter(false), partitionOrder(false), ascending(true), maxParts(true), catName(false) {} bool applyDistinct :1; bool filter :1; bool partitionOrder :1; bool ascending :1; bool maxParts :1; + bool catName :1; } _PartitionValuesRequest__isset; class PartitionValuesRequest { @@ -5677,7 +6172,7 @@ class PartitionValuesRequest { PartitionValuesRequest(const PartitionValuesRequest&); PartitionValuesRequest& operator=(const PartitionValuesRequest&); - PartitionValuesRequest() : dbName(), tblName(), applyDistinct(true), filter(), ascending(true), maxParts(-1LL) { + PartitionValuesRequest() : dbName(), tblName(), applyDistinct(true), filter(), ascending(true), maxParts(-1LL), catName() { } virtual ~PartitionValuesRequest() throw(); @@ -5689,6 +6184,7 @@ class PartitionValuesRequest { std::vector partitionOrder; bool ascending; int64_t maxParts; + std::string catName; _PartitionValuesRequest__isset __isset; @@ -5708,6 +6204,8 @@ class PartitionValuesRequest { void __set_maxParts(const int64_t val); + void __set_catName(const std::string& val); + bool operator == (const PartitionValuesRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5736,6 +6234,10 @@ class PartitionValuesRequest { return false; else if (__isset.maxParts && !(maxParts == rhs.maxParts)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const PartitionValuesRequest &rhs) const { @@ -5891,7 +6393,7 @@ inline std::ostream& operator<<(std::ostream& out, const ResourceUri& obj) } typedef struct _Function__isset { - _Function__isset() : functionName(false), dbName(false), className(false), ownerName(false), ownerType(false), createTime(false), functionType(false), resourceUris(false) {} + _Function__isset() : functionName(false), dbName(false), className(false), ownerName(false), ownerType(false), createTime(false), functionType(false), resourceUris(false), catName(false) {} bool functionName :1; bool dbName :1; bool className :1; @@ -5900,6 +6402,7 @@ typedef struct _Function__isset { bool createTime :1; bool functionType :1; bool resourceUris :1; + bool catName :1; } _Function__isset; class Function { @@ -5907,7 +6410,7 @@ class Function { Function(const Function&); Function& operator=(const Function&); - Function() : functionName(), dbName(), className(), ownerName(), ownerType((PrincipalType::type)0), createTime(0), functionType((FunctionType::type)0) { + Function() : functionName(), dbName(), className(), ownerName(), ownerType((PrincipalType::type)0), createTime(0), functionType((FunctionType::type)0), catName() { } virtual ~Function() throw(); @@ -5919,6 +6422,7 @@ class Function { int32_t createTime; FunctionType::type functionType; std::vector resourceUris; + std::string catName; _Function__isset __isset; @@ -5938,6 +6442,8 @@ class Function { void __set_resourceUris(const std::vector & val); + void __set_catName(const std::string& val); + bool operator == (const Function & rhs) const { if (!(functionName == rhs.functionName)) @@ -5956,6 +6462,10 @@ class Function { return false; if (!(resourceUris == rhs.resourceUris)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const Function &rhs) const { @@ -7928,10 +8438,11 @@ class CreationMetadata { CreationMetadata(const CreationMetadata&); CreationMetadata& operator=(const CreationMetadata&); - CreationMetadata() : dbName(), tblName(), validTxnList() { + CreationMetadata() : catName(), dbName(), tblName(), validTxnList() { } virtual ~CreationMetadata() throw(); + std::string catName; std::string dbName; std::string tblName; std::set tablesUsed; @@ -7939,6 +8450,8 @@ class CreationMetadata { _CreationMetadata__isset __isset; + void __set_catName(const std::string& val); + void __set_dbName(const std::string& val); void __set_tblName(const std::string& val); @@ -7949,6 +8462,8 @@ class CreationMetadata { bool operator == (const CreationMetadata & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(dbName == rhs.dbName)) return false; if (!(tblName == rhs.tblName)) @@ -8035,10 +8550,11 @@ inline std::ostream& operator<<(std::ostream& out, const NotificationEventReques } typedef struct _NotificationEvent__isset { - _NotificationEvent__isset() : dbName(false), tableName(false), messageFormat(false) {} + _NotificationEvent__isset() : dbName(false), tableName(false), messageFormat(false), catName(false) {} bool dbName :1; bool tableName :1; bool messageFormat :1; + bool catName :1; } _NotificationEvent__isset; class NotificationEvent { @@ -8046,7 +8562,7 @@ class NotificationEvent { NotificationEvent(const NotificationEvent&); NotificationEvent& operator=(const NotificationEvent&); - NotificationEvent() : eventId(0), eventTime(0), eventType(), dbName(), tableName(), message(), messageFormat() { + NotificationEvent() : eventId(0), eventTime(0), eventType(), dbName(), tableName(), message(), messageFormat(), catName() { } virtual ~NotificationEvent() throw(); @@ -8057,6 +8573,7 @@ class NotificationEvent { std::string tableName; std::string message; std::string messageFormat; + std::string catName; _NotificationEvent__isset __isset; @@ -8074,6 +8591,8 @@ class NotificationEvent { void __set_messageFormat(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const NotificationEvent & rhs) const { if (!(eventId == rhs.eventId)) @@ -8096,6 +8615,10 @@ class NotificationEvent { return false; else if (__isset.messageFormat && !(messageFormat == rhs.messageFormat)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const NotificationEvent &rhs) const { @@ -8198,29 +8721,42 @@ inline std::ostream& operator<<(std::ostream& out, const CurrentNotificationEven return out; } +typedef struct _NotificationEventsCountRequest__isset { + _NotificationEventsCountRequest__isset() : catName(false) {} + bool catName :1; +} _NotificationEventsCountRequest__isset; class NotificationEventsCountRequest { public: NotificationEventsCountRequest(const NotificationEventsCountRequest&); NotificationEventsCountRequest& operator=(const NotificationEventsCountRequest&); - NotificationEventsCountRequest() : fromEventId(0), dbName() { + NotificationEventsCountRequest() : fromEventId(0), dbName(), catName() { } virtual ~NotificationEventsCountRequest() throw(); int64_t fromEventId; std::string dbName; + std::string catName; + + _NotificationEventsCountRequest__isset __isset; void __set_fromEventId(const int64_t val); void __set_dbName(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const NotificationEventsCountRequest & rhs) const { if (!(fromEventId == rhs.fromEventId)) return false; if (!(dbName == rhs.dbName)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const NotificationEventsCountRequest &rhs) const { @@ -8391,10 +8927,11 @@ inline std::ostream& operator<<(std::ostream& out, const FireEventRequestData& o } typedef struct _FireEventRequest__isset { - _FireEventRequest__isset() : dbName(false), tableName(false), partitionVals(false) {} + _FireEventRequest__isset() : dbName(false), tableName(false), partitionVals(false), catName(false) {} bool dbName :1; bool tableName :1; bool partitionVals :1; + bool catName :1; } _FireEventRequest__isset; class FireEventRequest { @@ -8402,7 +8939,7 @@ class FireEventRequest { FireEventRequest(const FireEventRequest&); FireEventRequest& operator=(const FireEventRequest&); - FireEventRequest() : successful(0), dbName(), tableName() { + FireEventRequest() : successful(0), dbName(), tableName(), catName() { } virtual ~FireEventRequest() throw(); @@ -8411,6 +8948,7 @@ class FireEventRequest { std::string dbName; std::string tableName; std::vector partitionVals; + std::string catName; _FireEventRequest__isset __isset; @@ -8424,6 +8962,8 @@ class FireEventRequest { void __set_partitionVals(const std::vector & val); + void __set_catName(const std::string& val); + bool operator == (const FireEventRequest & rhs) const { if (!(successful == rhs.successful)) @@ -8442,6 +8982,10 @@ class FireEventRequest { return false; else if (__isset.partitionVals && !(partitionVals == rhs.partitionVals)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const FireEventRequest &rhs) const { @@ -9114,8 +9658,9 @@ inline std::ostream& operator<<(std::ostream& out, const ClientCapabilities& obj } typedef struct _GetTableRequest__isset { - _GetTableRequest__isset() : capabilities(false) {} + _GetTableRequest__isset() : capabilities(false), catName(false) {} bool capabilities :1; + bool catName :1; } _GetTableRequest__isset; class GetTableRequest { @@ -9123,13 +9668,14 @@ class GetTableRequest { GetTableRequest(const GetTableRequest&); GetTableRequest& operator=(const GetTableRequest&); - GetTableRequest() : dbName(), tblName() { + GetTableRequest() : dbName(), tblName(), catName() { } virtual ~GetTableRequest() throw(); std::string dbName; std::string tblName; ClientCapabilities capabilities; + std::string catName; _GetTableRequest__isset __isset; @@ -9139,6 +9685,8 @@ class GetTableRequest { void __set_capabilities(const ClientCapabilities& val); + void __set_catName(const std::string& val); + bool operator == (const GetTableRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -9149,6 +9697,10 @@ class GetTableRequest { return false; else if (__isset.capabilities && !(capabilities == rhs.capabilities)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const GetTableRequest &rhs) const { @@ -9212,9 +9764,10 @@ inline std::ostream& operator<<(std::ostream& out, const GetTableResult& obj) } typedef struct _GetTablesRequest__isset { - _GetTablesRequest__isset() : tblNames(false), capabilities(false) {} + _GetTablesRequest__isset() : tblNames(false), capabilities(false), catName(false) {} bool tblNames :1; bool capabilities :1; + bool catName :1; } _GetTablesRequest__isset; class GetTablesRequest { @@ -9222,13 +9775,14 @@ class GetTablesRequest { GetTablesRequest(const GetTablesRequest&); GetTablesRequest& operator=(const GetTablesRequest&); - GetTablesRequest() : dbName() { + GetTablesRequest() : dbName(), catName() { } virtual ~GetTablesRequest() throw(); std::string dbName; std::vector tblNames; ClientCapabilities capabilities; + std::string catName; _GetTablesRequest__isset __isset; @@ -9238,6 +9792,8 @@ class GetTablesRequest { void __set_capabilities(const ClientCapabilities& val); + void __set_catName(const std::string& val); + bool operator == (const GetTablesRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -9250,6 +9806,10 @@ class GetTablesRequest { return false; else if (__isset.capabilities && !(capabilities == rhs.capabilities)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const GetTablesRequest &rhs) const { @@ -9393,8 +9953,9 @@ inline std::ostream& operator<<(std::ostream& out, const CmRecycleResponse& obj) } typedef struct _TableMeta__isset { - _TableMeta__isset() : comments(false) {} + _TableMeta__isset() : comments(false), catName(false) {} bool comments :1; + bool catName :1; } _TableMeta__isset; class TableMeta { @@ -9402,7 +9963,7 @@ class TableMeta { TableMeta(const TableMeta&); TableMeta& operator=(const TableMeta&); - TableMeta() : dbName(), tableName(), tableType(), comments() { + TableMeta() : dbName(), tableName(), tableType(), comments(), catName() { } virtual ~TableMeta() throw(); @@ -9410,6 +9971,7 @@ class TableMeta { std::string tableName; std::string tableType; std::string comments; + std::string catName; _TableMeta__isset __isset; @@ -9421,6 +9983,8 @@ class TableMeta { void __set_comments(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const TableMeta & rhs) const { if (!(dbName == rhs.dbName)) @@ -9433,6 +9997,10 @@ class TableMeta { return false; else if (__isset.comments && !(comments == rhs.comments)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const TableMeta &rhs) const { @@ -11655,9 +12223,10 @@ inline std::ostream& operator<<(std::ostream& out, const WMCreateOrDropTriggerTo } typedef struct _ISchema__isset { - _ISchema__isset() : schemaType(false), name(false), dbName(false), compatibility(false), validationLevel(false), canEvolve(false), schemaGroup(false), description(false) {} + _ISchema__isset() : schemaType(false), name(false), catName(false), dbName(false), compatibility(false), validationLevel(false), canEvolve(false), schemaGroup(false), description(false) {} bool schemaType :1; bool name :1; + bool catName :1; bool dbName :1; bool compatibility :1; bool validationLevel :1; @@ -11671,12 +12240,13 @@ class ISchema { ISchema(const ISchema&); ISchema& operator=(const ISchema&); - ISchema() : schemaType((SchemaType::type)0), name(), dbName(), compatibility((SchemaCompatibility::type)0), validationLevel((SchemaValidation::type)0), canEvolve(0), schemaGroup(), description() { + ISchema() : schemaType((SchemaType::type)0), name(), catName(), dbName(), compatibility((SchemaCompatibility::type)0), validationLevel((SchemaValidation::type)0), canEvolve(0), schemaGroup(), description() { } virtual ~ISchema() throw(); SchemaType::type schemaType; std::string name; + std::string catName; std::string dbName; SchemaCompatibility::type compatibility; SchemaValidation::type validationLevel; @@ -11690,6 +12260,8 @@ class ISchema { void __set_name(const std::string& val); + void __set_catName(const std::string& val); + void __set_dbName(const std::string& val); void __set_compatibility(const SchemaCompatibility::type val); @@ -11708,6 +12280,8 @@ class ISchema { return false; if (!(name == rhs.name)) return false; + if (!(catName == rhs.catName)) + return false; if (!(dbName == rhs.dbName)) return false; if (!(compatibility == rhs.compatibility)) @@ -11747,7 +12321,8 @@ inline std::ostream& operator<<(std::ostream& out, const ISchema& obj) } typedef struct _ISchemaName__isset { - _ISchemaName__isset() : dbName(false), schemaName(false) {} + _ISchemaName__isset() : catName(false), dbName(false), schemaName(false) {} + bool catName :1; bool dbName :1; bool schemaName :1; } _ISchemaName__isset; @@ -11757,21 +12332,26 @@ class ISchemaName { ISchemaName(const ISchemaName&); ISchemaName& operator=(const ISchemaName&); - ISchemaName() : dbName(), schemaName() { + ISchemaName() : catName(), dbName(), schemaName() { } virtual ~ISchemaName() throw(); + std::string catName; std::string dbName; std::string schemaName; _ISchemaName__isset __isset; + void __set_catName(const std::string& val); + void __set_dbName(const std::string& val); void __set_schemaName(const std::string& val); bool operator == (const ISchemaName & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(dbName == rhs.dbName)) return false; if (!(schemaName == rhs.schemaName)) diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java index 911e98127b..24c68d8529 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AbortTxnsRequest st case 1: // TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list570 = iprot.readListBegin(); - struct.txn_ids = new ArrayList(_list570.size); - long _elem571; - for (int _i572 = 0; _i572 < _list570.size; ++_i572) + org.apache.thrift.protocol.TList _list578 = iprot.readListBegin(); + struct.txn_ids = new ArrayList(_list578.size); + long _elem579; + for (int _i580 = 0; _i580 < _list578.size; ++_i580) { - _elem571 = iprot.readI64(); - struct.txn_ids.add(_elem571); + _elem579 = iprot.readI64(); + struct.txn_ids.add(_elem579); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AbortTxnsRequest s oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txn_ids.size())); - for (long _iter573 : struct.txn_ids) + for (long _iter581 : struct.txn_ids) { - oprot.writeI64(_iter573); + oprot.writeI64(_iter581); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest st TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txn_ids.size()); - for (long _iter574 : struct.txn_ids) + for (long _iter582 : struct.txn_ids) { - oprot.writeI64(_iter574); + oprot.writeI64(_iter582); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest st public void read(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list575 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.txn_ids = new ArrayList(_list575.size); - long _elem576; - for (int _i577 = 0; _i577 < _list575.size; ++_i577) + org.apache.thrift.protocol.TList _list583 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txn_ids = new ArrayList(_list583.size); + long _elem584; + for (int _i585 = 0; _i585 < _list583.size; ++_i585) { - _elem576 = iprot.readI64(); - struct.txn_ids.add(_elem576); + _elem584 = iprot.readI64(); + struct.txn_ids.add(_elem584); } } struct.setTxn_idsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddCheckConstraintRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddCheckConstraintRequest.java index 8a37ee4bbd..02d552d0ca 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddCheckConstraintRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddCheckConstraintRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddCheckConstraintR case 1: // CHECK_CONSTRAINT_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list400 = iprot.readListBegin(); - struct.checkConstraintCols = new ArrayList(_list400.size); - SQLCheckConstraint _elem401; - for (int _i402 = 0; _i402 < _list400.size; ++_i402) + org.apache.thrift.protocol.TList _list408 = iprot.readListBegin(); + struct.checkConstraintCols = new ArrayList(_list408.size); + SQLCheckConstraint _elem409; + for (int _i410 = 0; _i410 < _list408.size; ++_i410) { - _elem401 = new SQLCheckConstraint(); - _elem401.read(iprot); - struct.checkConstraintCols.add(_elem401); + _elem409 = new SQLCheckConstraint(); + _elem409.read(iprot); + struct.checkConstraintCols.add(_elem409); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddCheckConstraint oprot.writeFieldBegin(CHECK_CONSTRAINT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraintCols.size())); - for (SQLCheckConstraint _iter403 : struct.checkConstraintCols) + for (SQLCheckConstraint _iter411 : struct.checkConstraintCols) { - _iter403.write(oprot); + _iter411.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddCheckConstraintR TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.checkConstraintCols.size()); - for (SQLCheckConstraint _iter404 : struct.checkConstraintCols) + for (SQLCheckConstraint _iter412 : struct.checkConstraintCols) { - _iter404.write(oprot); + _iter412.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddCheckConstraintR public void read(org.apache.thrift.protocol.TProtocol prot, AddCheckConstraintRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list405 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.checkConstraintCols = new ArrayList(_list405.size); - SQLCheckConstraint _elem406; - for (int _i407 = 0; _i407 < _list405.size; ++_i407) + org.apache.thrift.protocol.TList _list413 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraintCols = new ArrayList(_list413.size); + SQLCheckConstraint _elem414; + for (int _i415 = 0; _i415 < _list413.size; ++_i415) { - _elem406 = new SQLCheckConstraint(); - _elem406.read(iprot); - struct.checkConstraintCols.add(_elem406); + _elem414 = new SQLCheckConstraint(); + _elem414.read(iprot); + struct.checkConstraintCols.add(_elem414); } } struct.setCheckConstraintColsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java index b4b9cf251f..6acc6f818f 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddDefaultConstrain case 1: // DEFAULT_CONSTRAINT_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list392 = iprot.readListBegin(); - struct.defaultConstraintCols = new ArrayList(_list392.size); - SQLDefaultConstraint _elem393; - for (int _i394 = 0; _i394 < _list392.size; ++_i394) + org.apache.thrift.protocol.TList _list400 = iprot.readListBegin(); + struct.defaultConstraintCols = new ArrayList(_list400.size); + SQLDefaultConstraint _elem401; + for (int _i402 = 0; _i402 < _list400.size; ++_i402) { - _elem393 = new SQLDefaultConstraint(); - _elem393.read(iprot); - struct.defaultConstraintCols.add(_elem393); + _elem401 = new SQLDefaultConstraint(); + _elem401.read(iprot); + struct.defaultConstraintCols.add(_elem401); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddDefaultConstrai oprot.writeFieldBegin(DEFAULT_CONSTRAINT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraintCols.size())); - for (SQLDefaultConstraint _iter395 : struct.defaultConstraintCols) + for (SQLDefaultConstraint _iter403 : struct.defaultConstraintCols) { - _iter395.write(oprot); + _iter403.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDefaultConstrain TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.defaultConstraintCols.size()); - for (SQLDefaultConstraint _iter396 : struct.defaultConstraintCols) + for (SQLDefaultConstraint _iter404 : struct.defaultConstraintCols) { - _iter396.write(oprot); + _iter404.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDefaultConstrain public void read(org.apache.thrift.protocol.TProtocol prot, AddDefaultConstraintRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list397 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraintCols = new ArrayList(_list397.size); - SQLDefaultConstraint _elem398; - for (int _i399 = 0; _i399 < _list397.size; ++_i399) + org.apache.thrift.protocol.TList _list405 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraintCols = new ArrayList(_list405.size); + SQLDefaultConstraint _elem406; + for (int _i407 = 0; _i407 < _list405.size; ++_i407) { - _elem398 = new SQLDefaultConstraint(); - _elem398.read(iprot); - struct.defaultConstraintCols.add(_elem398); + _elem406 = new SQLDefaultConstraint(); + _elem406.read(iprot); + struct.defaultConstraintCols.add(_elem406); } } struct.setDefaultConstraintColsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java index 374cdc382c..d6a071ac79 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java @@ -816,13 +816,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddDynamicPartition case 5: // PARTITIONNAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list668 = iprot.readListBegin(); - struct.partitionnames = new ArrayList(_list668.size); - String _elem669; - for (int _i670 = 0; _i670 < _list668.size; ++_i670) + org.apache.thrift.protocol.TList _list676 = iprot.readListBegin(); + struct.partitionnames = new ArrayList(_list676.size); + String _elem677; + for (int _i678 = 0; _i678 < _list676.size; ++_i678) { - _elem669 = iprot.readString(); - struct.partitionnames.add(_elem669); + _elem677 = iprot.readString(); + struct.partitionnames.add(_elem677); } iprot.readListEnd(); } @@ -872,9 +872,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddDynamicPartitio oprot.writeFieldBegin(PARTITIONNAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionnames.size())); - for (String _iter671 : struct.partitionnames) + for (String _iter679 : struct.partitionnames) { - oprot.writeString(_iter671); + oprot.writeString(_iter679); } oprot.writeListEnd(); } @@ -910,9 +910,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartition oprot.writeString(struct.tablename); { oprot.writeI32(struct.partitionnames.size()); - for (String _iter672 : struct.partitionnames) + for (String _iter680 : struct.partitionnames) { - oprot.writeString(_iter672); + oprot.writeString(_iter680); } } BitSet optionals = new BitSet(); @@ -937,13 +937,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartitions struct.tablename = iprot.readString(); struct.setTablenameIsSet(true); { - org.apache.thrift.protocol.TList _list673 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionnames = new ArrayList(_list673.size); - String _elem674; - for (int _i675 = 0; _i675 < _list673.size; ++_i675) + org.apache.thrift.protocol.TList _list681 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionnames = new ArrayList(_list681.size); + String _elem682; + for (int _i683 = 0; _i683 < _list681.size; ++_i683) { - _elem674 = iprot.readString(); - struct.partitionnames.add(_elem674); + _elem682 = iprot.readString(); + struct.partitionnames.add(_elem682); } } struct.setPartitionnamesIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java index 9a2087ca59..f57eb3bdae 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddForeignKeyReques case 1: // FOREIGN_KEY_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list368 = iprot.readListBegin(); - struct.foreignKeyCols = new ArrayList(_list368.size); - SQLForeignKey _elem369; - for (int _i370 = 0; _i370 < _list368.size; ++_i370) + org.apache.thrift.protocol.TList _list376 = iprot.readListBegin(); + struct.foreignKeyCols = new ArrayList(_list376.size); + SQLForeignKey _elem377; + for (int _i378 = 0; _i378 < _list376.size; ++_i378) { - _elem369 = new SQLForeignKey(); - _elem369.read(iprot); - struct.foreignKeyCols.add(_elem369); + _elem377 = new SQLForeignKey(); + _elem377.read(iprot); + struct.foreignKeyCols.add(_elem377); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddForeignKeyReque oprot.writeFieldBegin(FOREIGN_KEY_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeyCols.size())); - for (SQLForeignKey _iter371 : struct.foreignKeyCols) + for (SQLForeignKey _iter379 : struct.foreignKeyCols) { - _iter371.write(oprot); + _iter379.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddForeignKeyReques TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.foreignKeyCols.size()); - for (SQLForeignKey _iter372 : struct.foreignKeyCols) + for (SQLForeignKey _iter380 : struct.foreignKeyCols) { - _iter372.write(oprot); + _iter380.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddForeignKeyReques public void read(org.apache.thrift.protocol.TProtocol prot, AddForeignKeyRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeyCols = new ArrayList(_list373.size); - SQLForeignKey _elem374; - for (int _i375 = 0; _i375 < _list373.size; ++_i375) + org.apache.thrift.protocol.TList _list381 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeyCols = new ArrayList(_list381.size); + SQLForeignKey _elem382; + for (int _i383 = 0; _i383 < _list381.size; ++_i383) { - _elem374 = new SQLForeignKey(); - _elem374.read(iprot); - struct.foreignKeyCols.add(_elem374); + _elem382 = new SQLForeignKey(); + _elem382.read(iprot); + struct.foreignKeyCols.add(_elem382); } } struct.setForeignKeyColsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java index d3d771cdfa..e6bac16782 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddNotNullConstrain case 1: // NOT_NULL_CONSTRAINT_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list384 = iprot.readListBegin(); - struct.notNullConstraintCols = new ArrayList(_list384.size); - SQLNotNullConstraint _elem385; - for (int _i386 = 0; _i386 < _list384.size; ++_i386) + org.apache.thrift.protocol.TList _list392 = iprot.readListBegin(); + struct.notNullConstraintCols = new ArrayList(_list392.size); + SQLNotNullConstraint _elem393; + for (int _i394 = 0; _i394 < _list392.size; ++_i394) { - _elem385 = new SQLNotNullConstraint(); - _elem385.read(iprot); - struct.notNullConstraintCols.add(_elem385); + _elem393 = new SQLNotNullConstraint(); + _elem393.read(iprot); + struct.notNullConstraintCols.add(_elem393); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddNotNullConstrai oprot.writeFieldBegin(NOT_NULL_CONSTRAINT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraintCols.size())); - for (SQLNotNullConstraint _iter387 : struct.notNullConstraintCols) + for (SQLNotNullConstraint _iter395 : struct.notNullConstraintCols) { - _iter387.write(oprot); + _iter395.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddNotNullConstrain TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.notNullConstraintCols.size()); - for (SQLNotNullConstraint _iter388 : struct.notNullConstraintCols) + for (SQLNotNullConstraint _iter396 : struct.notNullConstraintCols) { - _iter388.write(oprot); + _iter396.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddNotNullConstrain public void read(org.apache.thrift.protocol.TProtocol prot, AddNotNullConstraintRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list389 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraintCols = new ArrayList(_list389.size); - SQLNotNullConstraint _elem390; - for (int _i391 = 0; _i391 < _list389.size; ++_i391) + org.apache.thrift.protocol.TList _list397 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraintCols = new ArrayList(_list397.size); + SQLNotNullConstraint _elem398; + for (int _i399 = 0; _i399 < _list397.size; ++_i399) { - _elem390 = new SQLNotNullConstraint(); - _elem390.read(iprot); - struct.notNullConstraintCols.add(_elem390); + _elem398 = new SQLNotNullConstraint(); + _elem398.read(iprot); + struct.notNullConstraintCols.add(_elem398); } } struct.setNotNullConstraintColsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java index 1e6f973f46..dd3a127013 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("parts", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField IF_NOT_EXISTS_FIELD_DESC = new org.apache.thrift.protocol.TField("ifNotExists", org.apache.thrift.protocol.TType.BOOL, (short)4); private static final org.apache.thrift.protocol.TField NEED_RESULT_FIELD_DESC = new org.apache.thrift.protocol.TField("needResult", org.apache.thrift.protocol.TType.BOOL, (short)5); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private List parts; // required private boolean ifNotExists; // required private boolean needResult; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +64,8 @@ TBL_NAME((short)2, "tblName"), PARTS((short)3, "parts"), IF_NOT_EXISTS((short)4, "ifNotExists"), - NEED_RESULT((short)5, "needResult"); + NEED_RESULT((short)5, "needResult"), + CAT_NAME((short)6, "catName"); private static final Map byName = new HashMap(); @@ -87,6 +90,8 @@ public static _Fields findByThriftId(int fieldId) { return IF_NOT_EXISTS; case 5: // NEED_RESULT return NEED_RESULT; + case 6: // CAT_NAME + return CAT_NAME; default: return null; } @@ -130,7 +135,7 @@ public String getFieldName() { private static final int __IFNOTEXISTS_ISSET_ID = 0; private static final int __NEEDRESULT_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.NEED_RESULT}; + private static final _Fields optionals[] = {_Fields.NEED_RESULT,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -145,6 +150,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.NEED_RESULT, new org.apache.thrift.meta_data.FieldMetaData("needResult", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddPartitionsRequest.class, metaDataMap); } @@ -188,6 +195,9 @@ public AddPartitionsRequest(AddPartitionsRequest other) { } this.ifNotExists = other.ifNotExists; this.needResult = other.needResult; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public AddPartitionsRequest deepCopy() { @@ -203,6 +213,7 @@ public void clear() { this.ifNotExists = false; this.needResult = true; + this.catName = null; } public String getDbName() { @@ -333,6 +344,29 @@ public void setNeedResultIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NEEDRESULT_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -375,6 +409,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -395,6 +437,9 @@ public Object getFieldValue(_Fields field) { case NEED_RESULT: return isNeedResult(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -416,6 +461,8 @@ public boolean isSet(_Fields field) { return isSetIfNotExists(); case NEED_RESULT: return isSetNeedResult(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -478,6 +525,15 @@ public boolean equals(AddPartitionsRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -510,6 +566,11 @@ public int hashCode() { if (present_needResult) list.add(needResult); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -571,6 +632,16 @@ public int compareTo(AddPartitionsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -624,6 +695,16 @@ public String toString() { sb.append(this.needResult); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -704,14 +785,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsReques case 3: // PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list474 = iprot.readListBegin(); - struct.parts = new ArrayList(_list474.size); - Partition _elem475; - for (int _i476 = 0; _i476 < _list474.size; ++_i476) + org.apache.thrift.protocol.TList _list482 = iprot.readListBegin(); + struct.parts = new ArrayList(_list482.size); + Partition _elem483; + for (int _i484 = 0; _i484 < _list482.size; ++_i484) { - _elem475 = new Partition(); - _elem475.read(iprot); - struct.parts.add(_elem475); + _elem483 = new Partition(); + _elem483.read(iprot); + struct.parts.add(_elem483); } iprot.readListEnd(); } @@ -736,6 +817,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsReques org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -763,9 +852,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsReque oprot.writeFieldBegin(PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.parts.size())); - for (Partition _iter477 : struct.parts) + for (Partition _iter485 : struct.parts) { - _iter477.write(oprot); + _iter485.write(oprot); } oprot.writeListEnd(); } @@ -779,6 +868,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsReque oprot.writeBool(struct.needResult); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -800,9 +896,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsReques oprot.writeString(struct.tblName); { oprot.writeI32(struct.parts.size()); - for (Partition _iter478 : struct.parts) + for (Partition _iter486 : struct.parts) { - _iter478.write(oprot); + _iter486.write(oprot); } } oprot.writeBool(struct.ifNotExists); @@ -810,10 +906,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsReques if (struct.isSetNeedResult()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetNeedResult()) { oprot.writeBool(struct.needResult); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -824,24 +926,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsRequest struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list479 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.parts = new ArrayList(_list479.size); - Partition _elem480; - for (int _i481 = 0; _i481 < _list479.size; ++_i481) + org.apache.thrift.protocol.TList _list487 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.parts = new ArrayList(_list487.size); + Partition _elem488; + for (int _i489 = 0; _i489 < _list487.size; ++_i489) { - _elem480 = new Partition(); - _elem480.read(iprot); - struct.parts.add(_elem480); + _elem488 = new Partition(); + _elem488.read(iprot); + struct.parts.add(_elem488); } } struct.setPartsIsSet(true); struct.ifNotExists = iprot.readBool(); struct.setIfNotExistsIsSet(true); - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.needResult = iprot.readBool(); struct.setNeedResultIsSet(true); } + if (incoming.get(1)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java index fb21b45183..fe41b8c711 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsResult case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list466 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list466.size); - Partition _elem467; - for (int _i468 = 0; _i468 < _list466.size; ++_i468) + org.apache.thrift.protocol.TList _list474 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list474.size); + Partition _elem475; + for (int _i476 = 0; _i476 < _list474.size; ++_i476) { - _elem467 = new Partition(); - _elem467.read(iprot); - struct.partitions.add(_elem467); + _elem475 = new Partition(); + _elem475.read(iprot); + struct.partitions.add(_elem475); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsResul oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter469 : struct.partitions) + for (Partition _iter477 : struct.partitions) { - _iter469.write(oprot); + _iter477.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (Partition _iter470 : struct.partitions) + for (Partition _iter478 : struct.partitions) { - _iter470.write(oprot); + _iter478.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list471 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list471.size); - Partition _elem472; - for (int _i473 = 0; _i473 < _list471.size; ++_i473) + org.apache.thrift.protocol.TList _list479 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list479.size); + Partition _elem480; + for (int _i481 = 0; _i481 < _list479.size; ++_i481) { - _elem472 = new Partition(); - _elem472.read(iprot); - struct.partitions.add(_elem472); + _elem480 = new Partition(); + _elem480.read(iprot); + struct.partitions.add(_elem480); } } struct.setPartitionsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java index 79c79302d8..39bb6be891 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPrimaryKeyReques case 1: // PRIMARY_KEY_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list360 = iprot.readListBegin(); - struct.primaryKeyCols = new ArrayList(_list360.size); - SQLPrimaryKey _elem361; - for (int _i362 = 0; _i362 < _list360.size; ++_i362) + org.apache.thrift.protocol.TList _list368 = iprot.readListBegin(); + struct.primaryKeyCols = new ArrayList(_list368.size); + SQLPrimaryKey _elem369; + for (int _i370 = 0; _i370 < _list368.size; ++_i370) { - _elem361 = new SQLPrimaryKey(); - _elem361.read(iprot); - struct.primaryKeyCols.add(_elem361); + _elem369 = new SQLPrimaryKey(); + _elem369.read(iprot); + struct.primaryKeyCols.add(_elem369); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPrimaryKeyReque oprot.writeFieldBegin(PRIMARY_KEY_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeyCols.size())); - for (SQLPrimaryKey _iter363 : struct.primaryKeyCols) + for (SQLPrimaryKey _iter371 : struct.primaryKeyCols) { - _iter363.write(oprot); + _iter371.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPrimaryKeyReques TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.primaryKeyCols.size()); - for (SQLPrimaryKey _iter364 : struct.primaryKeyCols) + for (SQLPrimaryKey _iter372 : struct.primaryKeyCols) { - _iter364.write(oprot); + _iter372.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPrimaryKeyReques public void read(org.apache.thrift.protocol.TProtocol prot, AddPrimaryKeyRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeyCols = new ArrayList(_list365.size); - SQLPrimaryKey _elem366; - for (int _i367 = 0; _i367 < _list365.size; ++_i367) + org.apache.thrift.protocol.TList _list373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeyCols = new ArrayList(_list373.size); + SQLPrimaryKey _elem374; + for (int _i375 = 0; _i375 < _list373.size; ++_i375) { - _elem366 = new SQLPrimaryKey(); - _elem366.read(iprot); - struct.primaryKeyCols.add(_elem366); + _elem374 = new SQLPrimaryKey(); + _elem374.read(iprot); + struct.primaryKeyCols.add(_elem374); } } struct.setPrimaryKeyColsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java index 0cfee8a51f..bcb1e6b847 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddUniqueConstraint case 1: // UNIQUE_CONSTRAINT_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list376 = iprot.readListBegin(); - struct.uniqueConstraintCols = new ArrayList(_list376.size); - SQLUniqueConstraint _elem377; - for (int _i378 = 0; _i378 < _list376.size; ++_i378) + org.apache.thrift.protocol.TList _list384 = iprot.readListBegin(); + struct.uniqueConstraintCols = new ArrayList(_list384.size); + SQLUniqueConstraint _elem385; + for (int _i386 = 0; _i386 < _list384.size; ++_i386) { - _elem377 = new SQLUniqueConstraint(); - _elem377.read(iprot); - struct.uniqueConstraintCols.add(_elem377); + _elem385 = new SQLUniqueConstraint(); + _elem385.read(iprot); + struct.uniqueConstraintCols.add(_elem385); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddUniqueConstrain oprot.writeFieldBegin(UNIQUE_CONSTRAINT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraintCols.size())); - for (SQLUniqueConstraint _iter379 : struct.uniqueConstraintCols) + for (SQLUniqueConstraint _iter387 : struct.uniqueConstraintCols) { - _iter379.write(oprot); + _iter387.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddUniqueConstraint TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.uniqueConstraintCols.size()); - for (SQLUniqueConstraint _iter380 : struct.uniqueConstraintCols) + for (SQLUniqueConstraint _iter388 : struct.uniqueConstraintCols) { - _iter380.write(oprot); + _iter388.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddUniqueConstraint public void read(org.apache.thrift.protocol.TProtocol prot, AddUniqueConstraintRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list381 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraintCols = new ArrayList(_list381.size); - SQLUniqueConstraint _elem382; - for (int _i383 = 0; _i383 < _list381.size; ++_i383) + org.apache.thrift.protocol.TList _list389 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraintCols = new ArrayList(_list389.size); + SQLUniqueConstraint _elem390; + for (int _i391 = 0; _i391 < _list389.size; ++_i391) { - _elem382 = new SQLUniqueConstraint(); - _elem382.read(iprot); - struct.uniqueConstraintCols.add(_elem382); + _elem390 = new SQLUniqueConstraint(); + _elem390.read(iprot); + struct.uniqueConstraintCols.add(_elem390); } } struct.setUniqueConstraintColsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java index 718a6371ff..fff212dfd4 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java @@ -439,14 +439,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AggrStats struct) t case 1: // COL_STATS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list268 = iprot.readListBegin(); - struct.colStats = new ArrayList(_list268.size); - ColumnStatisticsObj _elem269; - for (int _i270 = 0; _i270 < _list268.size; ++_i270) + org.apache.thrift.protocol.TList _list276 = iprot.readListBegin(); + struct.colStats = new ArrayList(_list276.size); + ColumnStatisticsObj _elem277; + for (int _i278 = 0; _i278 < _list276.size; ++_i278) { - _elem269 = new ColumnStatisticsObj(); - _elem269.read(iprot); - struct.colStats.add(_elem269); + _elem277 = new ColumnStatisticsObj(); + _elem277.read(iprot); + struct.colStats.add(_elem277); } iprot.readListEnd(); } @@ -480,9 +480,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AggrStats struct) oprot.writeFieldBegin(COL_STATS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.colStats.size())); - for (ColumnStatisticsObj _iter271 : struct.colStats) + for (ColumnStatisticsObj _iter279 : struct.colStats) { - _iter271.write(oprot); + _iter279.write(oprot); } oprot.writeListEnd(); } @@ -510,9 +510,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) t TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.colStats.size()); - for (ColumnStatisticsObj _iter272 : struct.colStats) + for (ColumnStatisticsObj _iter280 : struct.colStats) { - _iter272.write(oprot); + _iter280.write(oprot); } } oprot.writeI64(struct.partsFound); @@ -522,14 +522,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) t public void read(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list273 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.colStats = new ArrayList(_list273.size); - ColumnStatisticsObj _elem274; - for (int _i275 = 0; _i275 < _list273.size; ++_i275) + org.apache.thrift.protocol.TList _list281 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.colStats = new ArrayList(_list281.size); + ColumnStatisticsObj _elem282; + for (int _i283 = 0; _i283 < _list281.size; ++_i283) { - _elem274 = new ColumnStatisticsObj(); - _elem274.read(iprot); - struct.colStats.add(_elem274); + _elem282 = new ColumnStatisticsObj(); + _elem282.read(iprot); + struct.colStats.add(_elem282); } } struct.setColStatsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java index bf9585493a..fd0d3c9771 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java @@ -521,13 +521,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI case 1: // TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list602 = iprot.readListBegin(); - struct.txnIds = new ArrayList(_list602.size); - long _elem603; - for (int _i604 = 0; _i604 < _list602.size; ++_i604) + org.apache.thrift.protocol.TList _list610 = iprot.readListBegin(); + struct.txnIds = new ArrayList(_list610.size); + long _elem611; + for (int _i612 = 0; _i612 < _list610.size; ++_i612) { - _elem603 = iprot.readI64(); - struct.txnIds.add(_elem603); + _elem611 = iprot.readI64(); + struct.txnIds.add(_elem611); } iprot.readListEnd(); } @@ -569,9 +569,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txnIds.size())); - for (long _iter605 : struct.txnIds) + for (long _iter613 : struct.txnIds) { - oprot.writeI64(_iter605); + oprot.writeI64(_iter613); } oprot.writeListEnd(); } @@ -606,9 +606,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txnIds.size()); - for (long _iter606 : struct.txnIds) + for (long _iter614 : struct.txnIds) { - oprot.writeI64(_iter606); + oprot.writeI64(_iter614); } } oprot.writeString(struct.dbName); @@ -619,13 +619,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list607 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.txnIds = new ArrayList(_list607.size); - long _elem608; - for (int _i609 = 0; _i609 < _list607.size; ++_i609) + org.apache.thrift.protocol.TList _list615 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txnIds = new ArrayList(_list615.size); + long _elem616; + for (int _i617 = 0; _i617 < _list615.size; ++_i617) { - _elem608 = iprot.readI64(); - struct.txnIds.add(_elem608); + _elem616 = iprot.readI64(); + struct.txnIds.add(_elem616); } } struct.setTxnIdsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java index 5ce8d51469..fb47073ad5 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteI case 1: // TXN_TO_WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list610 = iprot.readListBegin(); - struct.txnToWriteIds = new ArrayList(_list610.size); - TxnToWriteId _elem611; - for (int _i612 = 0; _i612 < _list610.size; ++_i612) + org.apache.thrift.protocol.TList _list618 = iprot.readListBegin(); + struct.txnToWriteIds = new ArrayList(_list618.size); + TxnToWriteId _elem619; + for (int _i620 = 0; _i620 < _list618.size; ++_i620) { - _elem611 = new TxnToWriteId(); - _elem611.read(iprot); - struct.txnToWriteIds.add(_elem611); + _elem619 = new TxnToWriteId(); + _elem619.read(iprot); + struct.txnToWriteIds.add(_elem619); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWrite oprot.writeFieldBegin(TXN_TO_WRITE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.txnToWriteIds.size())); - for (TxnToWriteId _iter613 : struct.txnToWriteIds) + for (TxnToWriteId _iter621 : struct.txnToWriteIds) { - _iter613.write(oprot); + _iter621.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txnToWriteIds.size()); - for (TxnToWriteId _iter614 : struct.txnToWriteIds) + for (TxnToWriteId _iter622 : struct.txnToWriteIds) { - _iter614.write(oprot); + _iter622.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteI public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list615 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.txnToWriteIds = new ArrayList(_list615.size); - TxnToWriteId _elem616; - for (int _i617 = 0; _i617 < _list615.size; ++_i617) + org.apache.thrift.protocol.TList _list623 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.txnToWriteIds = new ArrayList(_list623.size); + TxnToWriteId _elem624; + for (int _i625 = 0; _i625 < _list623.size; ++_i625) { - _elem616 = new TxnToWriteId(); - _elem616.read(iprot); - struct.txnToWriteIds.add(_elem616); + _elem624 = new TxnToWriteId(); + _elem624.read(iprot); + struct.txnToWriteIds.add(_elem624); } } struct.setTxnToWriteIdsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Catalog.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Catalog.java new file mode 100644 index 0000000000..3eb4dbd511 --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Catalog.java @@ -0,0 +1,606 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Catalog implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Catalog"); + + private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DESCRIPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("description", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField LOCATION_URI_FIELD_DESC = new org.apache.thrift.protocol.TField("locationUri", org.apache.thrift.protocol.TType.STRING, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new CatalogStandardSchemeFactory()); + schemes.put(TupleScheme.class, new CatalogTupleSchemeFactory()); + } + + private String name; // required + private String description; // optional + private String locationUri; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + NAME((short)1, "name"), + DESCRIPTION((short)2, "description"), + LOCATION_URI((short)3, "locationUri"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // NAME + return NAME; + case 2: // DESCRIPTION + return DESCRIPTION; + case 3: // LOCATION_URI + return LOCATION_URI; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final _Fields optionals[] = {_Fields.DESCRIPTION}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.DESCRIPTION, new org.apache.thrift.meta_data.FieldMetaData("description", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.LOCATION_URI, new org.apache.thrift.meta_data.FieldMetaData("locationUri", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Catalog.class, metaDataMap); + } + + public Catalog() { + } + + public Catalog( + String name, + String locationUri) + { + this(); + this.name = name; + this.locationUri = locationUri; + } + + /** + * Performs a deep copy on other. + */ + public Catalog(Catalog other) { + if (other.isSetName()) { + this.name = other.name; + } + if (other.isSetDescription()) { + this.description = other.description; + } + if (other.isSetLocationUri()) { + this.locationUri = other.locationUri; + } + } + + public Catalog deepCopy() { + return new Catalog(this); + } + + @Override + public void clear() { + this.name = null; + this.description = null; + this.locationUri = null; + } + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public void unsetName() { + this.name = null; + } + + /** Returns true if field name is set (has been assigned a value) and false otherwise */ + public boolean isSetName() { + return this.name != null; + } + + public void setNameIsSet(boolean value) { + if (!value) { + this.name = null; + } + } + + public String getDescription() { + return this.description; + } + + public void setDescription(String description) { + this.description = description; + } + + public void unsetDescription() { + this.description = null; + } + + /** Returns true if field description is set (has been assigned a value) and false otherwise */ + public boolean isSetDescription() { + return this.description != null; + } + + public void setDescriptionIsSet(boolean value) { + if (!value) { + this.description = null; + } + } + + public String getLocationUri() { + return this.locationUri; + } + + public void setLocationUri(String locationUri) { + this.locationUri = locationUri; + } + + public void unsetLocationUri() { + this.locationUri = null; + } + + /** Returns true if field locationUri is set (has been assigned a value) and false otherwise */ + public boolean isSetLocationUri() { + return this.locationUri != null; + } + + public void setLocationUriIsSet(boolean value) { + if (!value) { + this.locationUri = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case NAME: + if (value == null) { + unsetName(); + } else { + setName((String)value); + } + break; + + case DESCRIPTION: + if (value == null) { + unsetDescription(); + } else { + setDescription((String)value); + } + break; + + case LOCATION_URI: + if (value == null) { + unsetLocationUri(); + } else { + setLocationUri((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case NAME: + return getName(); + + case DESCRIPTION: + return getDescription(); + + case LOCATION_URI: + return getLocationUri(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case NAME: + return isSetName(); + case DESCRIPTION: + return isSetDescription(); + case LOCATION_URI: + return isSetLocationUri(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof Catalog) + return this.equals((Catalog)that); + return false; + } + + public boolean equals(Catalog that) { + if (that == null) + return false; + + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) + return false; + if (!this.name.equals(that.name)) + return false; + } + + boolean this_present_description = true && this.isSetDescription(); + boolean that_present_description = true && that.isSetDescription(); + if (this_present_description || that_present_description) { + if (!(this_present_description && that_present_description)) + return false; + if (!this.description.equals(that.description)) + return false; + } + + boolean this_present_locationUri = true && this.isSetLocationUri(); + boolean that_present_locationUri = true && that.isSetLocationUri(); + if (this_present_locationUri || that_present_locationUri) { + if (!(this_present_locationUri && that_present_locationUri)) + return false; + if (!this.locationUri.equals(that.locationUri)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_name = true && (isSetName()); + list.add(present_name); + if (present_name) + list.add(name); + + boolean present_description = true && (isSetDescription()); + list.add(present_description); + if (present_description) + list.add(description); + + boolean present_locationUri = true && (isSetLocationUri()); + list.add(present_locationUri); + if (present_locationUri) + list.add(locationUri); + + return list.hashCode(); + } + + @Override + public int compareTo(Catalog other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDescription()).compareTo(other.isSetDescription()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDescription()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.description, other.description); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetLocationUri()).compareTo(other.isSetLocationUri()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetLocationUri()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.locationUri, other.locationUri); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("Catalog("); + boolean first = true; + + sb.append("name:"); + if (this.name == null) { + sb.append("null"); + } else { + sb.append(this.name); + } + first = false; + if (isSetDescription()) { + if (!first) sb.append(", "); + sb.append("description:"); + if (this.description == null) { + sb.append("null"); + } else { + sb.append(this.description); + } + first = false; + } + if (!first) sb.append(", "); + sb.append("locationUri:"); + if (this.locationUri == null) { + sb.append("null"); + } else { + sb.append(this.locationUri); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class CatalogStandardSchemeFactory implements SchemeFactory { + public CatalogStandardScheme getScheme() { + return new CatalogStandardScheme(); + } + } + + private static class CatalogStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, Catalog struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DESCRIPTION + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.description = iprot.readString(); + struct.setDescriptionIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // LOCATION_URI + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.locationUri = iprot.readString(); + struct.setLocationUriIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, Catalog struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(struct.name); + oprot.writeFieldEnd(); + } + if (struct.description != null) { + if (struct.isSetDescription()) { + oprot.writeFieldBegin(DESCRIPTION_FIELD_DESC); + oprot.writeString(struct.description); + oprot.writeFieldEnd(); + } + } + if (struct.locationUri != null) { + oprot.writeFieldBegin(LOCATION_URI_FIELD_DESC); + oprot.writeString(struct.locationUri); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class CatalogTupleSchemeFactory implements SchemeFactory { + public CatalogTupleScheme getScheme() { + return new CatalogTupleScheme(); + } + } + + private static class CatalogTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, Catalog struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetName()) { + optionals.set(0); + } + if (struct.isSetDescription()) { + optionals.set(1); + } + if (struct.isSetLocationUri()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetName()) { + oprot.writeString(struct.name); + } + if (struct.isSetDescription()) { + oprot.writeString(struct.description); + } + if (struct.isSetLocationUri()) { + oprot.writeString(struct.locationUri); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, Catalog struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } + if (incoming.get(1)) { + struct.description = iprot.readString(); + struct.setDescriptionIsSet(true); + } + if (incoming.get(2)) { + struct.locationUri = iprot.readString(); + struct.setLocationUriIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsRequest.java index 2a8d81a18b..2ba0407f70 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsRequest.java @@ -38,8 +38,9 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CheckConstraintsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CheckConstraintsRequest"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,13 +48,15 @@ schemes.put(TupleScheme.class, new CheckConstraintsRequestTupleSchemeFactory()); } + private String catName; // required private String db_name; // required private String tbl_name; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "db_name"), - TBL_NAME((short)2, "tbl_name"); + CAT_NAME((short)1, "catName"), + DB_NAME((short)2, "db_name"), + TBL_NAME((short)3, "tbl_name"); private static final Map byName = new HashMap(); @@ -68,9 +71,11 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME + case 1: // CAT_NAME + return CAT_NAME; + case 2: // DB_NAME return DB_NAME; - case 2: // TBL_NAME + case 3: // TBL_NAME return TBL_NAME; default: return null; @@ -115,6 +120,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.REQUIRED, @@ -127,10 +134,12 @@ public CheckConstraintsRequest() { } public CheckConstraintsRequest( + String catName, String db_name, String tbl_name) { this(); + this.catName = catName; this.db_name = db_name; this.tbl_name = tbl_name; } @@ -139,6 +148,9 @@ public CheckConstraintsRequest( * Performs a deep copy on other. */ public CheckConstraintsRequest(CheckConstraintsRequest other) { + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetDb_name()) { this.db_name = other.db_name; } @@ -153,10 +165,34 @@ public CheckConstraintsRequest deepCopy() { @Override public void clear() { + this.catName = null; this.db_name = null; this.tbl_name = null; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getDb_name() { return this.db_name; } @@ -205,6 +241,14 @@ public void setTbl_nameIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case DB_NAME: if (value == null) { unsetDb_name(); @@ -226,6 +270,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case DB_NAME: return getDb_name(); @@ -243,6 +290,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case DB_NAME: return isSetDb_name(); case TBL_NAME: @@ -264,6 +313,15 @@ public boolean equals(CheckConstraintsRequest that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_db_name = true && this.isSetDb_name(); boolean that_present_db_name = true && that.isSetDb_name(); if (this_present_db_name || that_present_db_name) { @@ -289,6 +347,11 @@ public boolean equals(CheckConstraintsRequest that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_db_name = true && (isSetDb_name()); list.add(present_db_name); if (present_db_name) @@ -310,6 +373,16 @@ public int compareTo(CheckConstraintsRequest other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); if (lastComparison != 0) { return lastComparison; @@ -350,6 +423,14 @@ public String toString() { StringBuilder sb = new StringBuilder("CheckConstraintsRequest("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("db_name:"); if (this.db_name == null) { sb.append("null"); @@ -371,6 +452,10 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields + if (!isSetCatName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'catName' is unset! Struct:" + toString()); + } + if (!isSetDb_name()) { throw new org.apache.thrift.protocol.TProtocolException("Required field 'db_name' is unset! Struct:" + toString()); } @@ -416,7 +501,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CheckConstraintsReq break; } switch (schemeField.id) { - case 1: // DB_NAME + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DB_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -424,7 +517,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CheckConstraintsReq org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TBL_NAME + case 3: // TBL_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); @@ -445,6 +538,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CheckConstraintsRe struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.db_name != null) { oprot.writeFieldBegin(DB_NAME_FIELD_DESC); oprot.writeString(struct.db_name); @@ -472,6 +570,7 @@ public CheckConstraintsRequestTupleScheme getScheme() { @Override public void write(org.apache.thrift.protocol.TProtocol prot, CheckConstraintsRequest struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.catName); oprot.writeString(struct.db_name); oprot.writeString(struct.tbl_name); } @@ -479,6 +578,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CheckConstraintsReq @Override public void read(org.apache.thrift.protocol.TProtocol prot, CheckConstraintsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); struct.tbl_name = iprot.readString(); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsResponse.java index a0a4422914..8d4f7be8fa 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CheckConstraintsRes case 1: // CHECK_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list352 = iprot.readListBegin(); - struct.checkConstraints = new ArrayList(_list352.size); - SQLCheckConstraint _elem353; - for (int _i354 = 0; _i354 < _list352.size; ++_i354) + org.apache.thrift.protocol.TList _list360 = iprot.readListBegin(); + struct.checkConstraints = new ArrayList(_list360.size); + SQLCheckConstraint _elem361; + for (int _i362 = 0; _i362 < _list360.size; ++_i362) { - _elem353 = new SQLCheckConstraint(); - _elem353.read(iprot); - struct.checkConstraints.add(_elem353); + _elem361 = new SQLCheckConstraint(); + _elem361.read(iprot); + struct.checkConstraints.add(_elem361); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CheckConstraintsRe oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size())); - for (SQLCheckConstraint _iter355 : struct.checkConstraints) + for (SQLCheckConstraint _iter363 : struct.checkConstraints) { - _iter355.write(oprot); + _iter363.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CheckConstraintsRes TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.checkConstraints.size()); - for (SQLCheckConstraint _iter356 : struct.checkConstraints) + for (SQLCheckConstraint _iter364 : struct.checkConstraints) { - _iter356.write(oprot); + _iter364.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CheckConstraintsRes public void read(org.apache.thrift.protocol.TProtocol prot, CheckConstraintsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list357 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.checkConstraints = new ArrayList(_list357.size); - SQLCheckConstraint _elem358; - for (int _i359 = 0; _i359 < _list357.size; ++_i359) + org.apache.thrift.protocol.TList _list365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraints = new ArrayList(_list365.size); + SQLCheckConstraint _elem366; + for (int _i367 = 0; _i367 < _list365.size; ++_i367) { - _elem358 = new SQLCheckConstraint(); - _elem358.read(iprot); - struct.checkConstraints.add(_elem358); + _elem366 = new SQLCheckConstraint(); + _elem366.read(iprot); + struct.checkConstraints.add(_elem366); } } struct.setCheckConstraintsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java index c4c1835573..b4bf2ce253 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClearFileMetadataRe case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list768 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list768.size); - long _elem769; - for (int _i770 = 0; _i770 < _list768.size; ++_i770) + org.apache.thrift.protocol.TList _list776 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list776.size); + long _elem777; + for (int _i778 = 0; _i778 < _list776.size; ++_i778) { - _elem769 = iprot.readI64(); - struct.fileIds.add(_elem769); + _elem777 = iprot.readI64(); + struct.fileIds.add(_elem777); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClearFileMetadataR oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter771 : struct.fileIds) + for (long _iter779 : struct.fileIds) { - oprot.writeI64(_iter771); + oprot.writeI64(_iter779); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter772 : struct.fileIds) + for (long _iter780 : struct.fileIds) { - oprot.writeI64(_iter772); + oprot.writeI64(_iter780); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe public void read(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list773 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list773.size); - long _elem774; - for (int _i775 = 0; _i775 < _list773.size; ++_i775) + org.apache.thrift.protocol.TList _list781 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list781.size); + long _elem782; + for (int _i783 = 0; _i783 < _list781.size; ++_i783) { - _elem774 = iprot.readI64(); - struct.fileIds.add(_elem774); + _elem782 = iprot.readI64(); + struct.fileIds.add(_elem782); } } struct.setFileIdsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java index 3085522cbe..a214a870cc 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java @@ -354,13 +354,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClientCapabilities case 1: // VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list784 = iprot.readListBegin(); - struct.values = new ArrayList(_list784.size); - ClientCapability _elem785; - for (int _i786 = 0; _i786 < _list784.size; ++_i786) + org.apache.thrift.protocol.TList _list792 = iprot.readListBegin(); + struct.values = new ArrayList(_list792.size); + ClientCapability _elem793; + for (int _i794 = 0; _i794 < _list792.size; ++_i794) { - _elem785 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem785); + _elem793 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem793); } iprot.readListEnd(); } @@ -386,9 +386,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClientCapabilities oprot.writeFieldBegin(VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.values.size())); - for (ClientCapability _iter787 : struct.values) + for (ClientCapability _iter795 : struct.values) { - oprot.writeI32(_iter787.getValue()); + oprot.writeI32(_iter795.getValue()); } oprot.writeListEnd(); } @@ -413,9 +413,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.values.size()); - for (ClientCapability _iter788 : struct.values) + for (ClientCapability _iter796 : struct.values) { - oprot.writeI32(_iter788.getValue()); + oprot.writeI32(_iter796.getValue()); } } } @@ -424,13 +424,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities public void read(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list789 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.values = new ArrayList(_list789.size); - ClientCapability _elem790; - for (int _i791 = 0; _i791 < _list789.size; ++_i791) + org.apache.thrift.protocol.TList _list797 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); + struct.values = new ArrayList(_list797.size); + ClientCapability _elem798; + for (int _i799 = 0; _i799 < _list797.size; ++_i799) { - _elem790 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); - struct.values.add(_elem790); + _elem798 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32()); + struct.values.add(_elem798); } } struct.setValuesIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java index 9eb4652a41..6ce7214c9d 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java @@ -451,14 +451,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnStatistics st case 2: // STATS_OBJ if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list260 = iprot.readListBegin(); - struct.statsObj = new ArrayList(_list260.size); - ColumnStatisticsObj _elem261; - for (int _i262 = 0; _i262 < _list260.size; ++_i262) + org.apache.thrift.protocol.TList _list268 = iprot.readListBegin(); + struct.statsObj = new ArrayList(_list268.size); + ColumnStatisticsObj _elem269; + for (int _i270 = 0; _i270 < _list268.size; ++_i270) { - _elem261 = new ColumnStatisticsObj(); - _elem261.read(iprot); - struct.statsObj.add(_elem261); + _elem269 = new ColumnStatisticsObj(); + _elem269.read(iprot); + struct.statsObj.add(_elem269); } iprot.readListEnd(); } @@ -489,9 +489,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnStatistics s oprot.writeFieldBegin(STATS_OBJ_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.statsObj.size())); - for (ColumnStatisticsObj _iter263 : struct.statsObj) + for (ColumnStatisticsObj _iter271 : struct.statsObj) { - _iter263.write(oprot); + _iter271.write(oprot); } oprot.writeListEnd(); } @@ -517,9 +517,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics st struct.statsDesc.write(oprot); { oprot.writeI32(struct.statsObj.size()); - for (ColumnStatisticsObj _iter264 : struct.statsObj) + for (ColumnStatisticsObj _iter272 : struct.statsObj) { - _iter264.write(oprot); + _iter272.write(oprot); } } } @@ -531,14 +531,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics str struct.statsDesc.read(iprot); struct.setStatsDescIsSet(true); { - org.apache.thrift.protocol.TList _list265 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.statsObj = new ArrayList(_list265.size); - ColumnStatisticsObj _elem266; - for (int _i267 = 0; _i267 < _list265.size; ++_i267) + org.apache.thrift.protocol.TList _list273 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.statsObj = new ArrayList(_list273.size); + ColumnStatisticsObj _elem274; + for (int _i275 = 0; _i275 < _list273.size; ++_i275) { - _elem266 = new ColumnStatisticsObj(); - _elem266.read(iprot); - struct.statsObj.add(_elem266); + _elem274 = new ColumnStatisticsObj(); + _elem274.read(iprot); + struct.statsObj.add(_elem274); } } struct.setStatsObjIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java index 922094b5eb..0e70758786 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField PART_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("partName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField LAST_ANALYZED_FIELD_DESC = new org.apache.thrift.protocol.TField("lastAnalyzed", org.apache.thrift.protocol.TType.I64, (short)5); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private String tableName; // required private String partName; // optional private long lastAnalyzed; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +64,8 @@ DB_NAME((short)2, "dbName"), TABLE_NAME((short)3, "tableName"), PART_NAME((short)4, "partName"), - LAST_ANALYZED((short)5, "lastAnalyzed"); + LAST_ANALYZED((short)5, "lastAnalyzed"), + CAT_NAME((short)6, "catName"); private static final Map byName = new HashMap(); @@ -87,6 +90,8 @@ public static _Fields findByThriftId(int fieldId) { return PART_NAME; case 5: // LAST_ANALYZED return LAST_ANALYZED; + case 6: // CAT_NAME + return CAT_NAME; default: return null; } @@ -130,7 +135,7 @@ public String getFieldName() { private static final int __ISTBLLEVEL_ISSET_ID = 0; private static final int __LASTANALYZED_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PART_NAME,_Fields.LAST_ANALYZED}; + private static final _Fields optionals[] = {_Fields.PART_NAME,_Fields.LAST_ANALYZED,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -144,6 +149,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.LAST_ANALYZED, new org.apache.thrift.meta_data.FieldMetaData("lastAnalyzed", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnStatisticsDesc.class, metaDataMap); } @@ -179,6 +186,9 @@ public ColumnStatisticsDesc(ColumnStatisticsDesc other) { this.partName = other.partName; } this.lastAnalyzed = other.lastAnalyzed; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public ColumnStatisticsDesc deepCopy() { @@ -194,6 +204,7 @@ public void clear() { this.partName = null; setLastAnalyzedIsSet(false); this.lastAnalyzed = 0; + this.catName = null; } public boolean isIsTblLevel() { @@ -309,6 +320,29 @@ public void setLastAnalyzedIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LASTANALYZED_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case IS_TBL_LEVEL: @@ -351,6 +385,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -371,6 +413,9 @@ public Object getFieldValue(_Fields field) { case LAST_ANALYZED: return getLastAnalyzed(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -392,6 +437,8 @@ public boolean isSet(_Fields field) { return isSetPartName(); case LAST_ANALYZED: return isSetLastAnalyzed(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -454,6 +501,15 @@ public boolean equals(ColumnStatisticsDesc that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -486,6 +542,11 @@ public int hashCode() { if (present_lastAnalyzed) list.add(lastAnalyzed); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -547,6 +608,16 @@ public int compareTo(ColumnStatisticsDesc other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -602,6 +673,16 @@ public String toString() { sb.append(this.lastAnalyzed); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -699,6 +780,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnStatisticsDes org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -737,6 +826,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnStatisticsDe oprot.writeI64(struct.lastAnalyzed); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -764,13 +860,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ColumnStatisticsDes if (struct.isSetLastAnalyzed()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetCatName()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetPartName()) { oprot.writeString(struct.partName); } if (struct.isSetLastAnalyzed()) { oprot.writeI64(struct.lastAnalyzed); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -782,7 +884,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ColumnStatisticsDesc struct.setDbNameIsSet(true); struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.partName = iprot.readString(); struct.setPartNameIsSet(true); @@ -791,6 +893,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ColumnStatisticsDesc struct.lastAnalyzed = iprot.readI64(); struct.setLastAnalyzedIsSet(true); } + if (incoming.get(2)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java index 1a27ff5fcf..a106cd4c37 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java @@ -814,15 +814,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionRequest s case 6: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map650 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map650.size); - String _key651; - String _val652; - for (int _i653 = 0; _i653 < _map650.size; ++_i653) + org.apache.thrift.protocol.TMap _map658 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map658.size); + String _key659; + String _val660; + for (int _i661 = 0; _i661 < _map658.size; ++_i661) { - _key651 = iprot.readString(); - _val652 = iprot.readString(); - struct.properties.put(_key651, _val652); + _key659 = iprot.readString(); + _val660 = iprot.readString(); + struct.properties.put(_key659, _val660); } iprot.readMapEnd(); } @@ -878,10 +878,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionRequest oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter654 : struct.properties.entrySet()) + for (Map.Entry _iter662 : struct.properties.entrySet()) { - oprot.writeString(_iter654.getKey()); - oprot.writeString(_iter654.getValue()); + oprot.writeString(_iter662.getKey()); + oprot.writeString(_iter662.getValue()); } oprot.writeMapEnd(); } @@ -928,10 +928,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CompactionRequest s if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter655 : struct.properties.entrySet()) + for (Map.Entry _iter663 : struct.properties.entrySet()) { - oprot.writeString(_iter655.getKey()); - oprot.writeString(_iter655.getValue()); + oprot.writeString(_iter663.getKey()); + oprot.writeString(_iter663.getValue()); } } } @@ -957,15 +957,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, CompactionRequest st } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map656 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map656.size); - String _key657; - String _val658; - for (int _i659 = 0; _i659 < _map656.size; ++_i659) + org.apache.thrift.protocol.TMap _map664 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map664.size); + String _key665; + String _val666; + for (int _i667 = 0; _i667 < _map664.size; ++_i667) { - _key657 = iprot.readString(); - _val658 = iprot.readString(); - struct.properties.put(_key657, _val658); + _key665 = iprot.readString(); + _val666 = iprot.readString(); + struct.properties.put(_key665, _val666); } } struct.setPropertiesIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateCatalogRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateCatalogRequest.java new file mode 100644 index 0000000000..c260b3d35e --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateCatalogRequest.java @@ -0,0 +1,400 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CreateCatalogRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CreateCatalogRequest"); + + private static final org.apache.thrift.protocol.TField CATALOG_FIELD_DESC = new org.apache.thrift.protocol.TField("catalog", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new CreateCatalogRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new CreateCatalogRequestTupleSchemeFactory()); + } + + private Catalog catalog; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + CATALOG((short)1, "catalog"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // CATALOG + return CATALOG; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CATALOG, new org.apache.thrift.meta_data.FieldMetaData("catalog", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Catalog.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CreateCatalogRequest.class, metaDataMap); + } + + public CreateCatalogRequest() { + } + + public CreateCatalogRequest( + Catalog catalog) + { + this(); + this.catalog = catalog; + } + + /** + * Performs a deep copy on other. + */ + public CreateCatalogRequest(CreateCatalogRequest other) { + if (other.isSetCatalog()) { + this.catalog = new Catalog(other.catalog); + } + } + + public CreateCatalogRequest deepCopy() { + return new CreateCatalogRequest(this); + } + + @Override + public void clear() { + this.catalog = null; + } + + public Catalog getCatalog() { + return this.catalog; + } + + public void setCatalog(Catalog catalog) { + this.catalog = catalog; + } + + public void unsetCatalog() { + this.catalog = null; + } + + /** Returns true if field catalog is set (has been assigned a value) and false otherwise */ + public boolean isSetCatalog() { + return this.catalog != null; + } + + public void setCatalogIsSet(boolean value) { + if (!value) { + this.catalog = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case CATALOG: + if (value == null) { + unsetCatalog(); + } else { + setCatalog((Catalog)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case CATALOG: + return getCatalog(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case CATALOG: + return isSetCatalog(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof CreateCatalogRequest) + return this.equals((CreateCatalogRequest)that); + return false; + } + + public boolean equals(CreateCatalogRequest that) { + if (that == null) + return false; + + boolean this_present_catalog = true && this.isSetCatalog(); + boolean that_present_catalog = true && that.isSetCatalog(); + if (this_present_catalog || that_present_catalog) { + if (!(this_present_catalog && that_present_catalog)) + return false; + if (!this.catalog.equals(that.catalog)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_catalog = true && (isSetCatalog()); + list.add(present_catalog); + if (present_catalog) + list.add(catalog); + + return list.hashCode(); + } + + @Override + public int compareTo(CreateCatalogRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetCatalog()).compareTo(other.isSetCatalog()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatalog()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalog, other.catalog); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("CreateCatalogRequest("); + boolean first = true; + + sb.append("catalog:"); + if (this.catalog == null) { + sb.append("null"); + } else { + sb.append(this.catalog); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (catalog != null) { + catalog.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class CreateCatalogRequestStandardSchemeFactory implements SchemeFactory { + public CreateCatalogRequestStandardScheme getScheme() { + return new CreateCatalogRequestStandardScheme(); + } + } + + private static class CreateCatalogRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, CreateCatalogRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // CATALOG + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.catalog = new Catalog(); + struct.catalog.read(iprot); + struct.setCatalogIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, CreateCatalogRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.catalog != null) { + oprot.writeFieldBegin(CATALOG_FIELD_DESC); + struct.catalog.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class CreateCatalogRequestTupleSchemeFactory implements SchemeFactory { + public CreateCatalogRequestTupleScheme getScheme() { + return new CreateCatalogRequestTupleScheme(); + } + } + + private static class CreateCatalogRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, CreateCatalogRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetCatalog()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatalog()) { + struct.catalog.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, CreateCatalogRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catalog = new Catalog(); + struct.catalog.read(iprot); + struct.setCatalogIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java index b744177383..d28972c734 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java @@ -38,10 +38,11 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CreationMetadata implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CreationMetadata"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField TABLES_USED_FIELD_DESC = new org.apache.thrift.protocol.TField("tablesUsed", org.apache.thrift.protocol.TType.SET, (short)3); - private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField TABLES_USED_FIELD_DESC = new org.apache.thrift.protocol.TField("tablesUsed", org.apache.thrift.protocol.TType.SET, (short)4); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -49,6 +50,7 @@ schemes.put(TupleScheme.class, new CreationMetadataTupleSchemeFactory()); } + private String catName; // required private String dbName; // required private String tblName; // required private Set tablesUsed; // required @@ -56,10 +58,11 @@ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "dbName"), - TBL_NAME((short)2, "tblName"), - TABLES_USED((short)3, "tablesUsed"), - VALID_TXN_LIST((short)4, "validTxnList"); + CAT_NAME((short)1, "catName"), + DB_NAME((short)2, "dbName"), + TBL_NAME((short)3, "tblName"), + TABLES_USED((short)4, "tablesUsed"), + VALID_TXN_LIST((short)5, "validTxnList"); private static final Map byName = new HashMap(); @@ -74,13 +77,15 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME + case 1: // CAT_NAME + return CAT_NAME; + case 2: // DB_NAME return DB_NAME; - case 2: // TBL_NAME + case 3: // TBL_NAME return TBL_NAME; - case 3: // TABLES_USED + case 4: // TABLES_USED return TABLES_USED; - case 4: // VALID_TXN_LIST + case 5: // VALID_TXN_LIST return VALID_TXN_LIST; default: return null; @@ -126,6 +131,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, @@ -143,11 +150,13 @@ public CreationMetadata() { } public CreationMetadata( + String catName, String dbName, String tblName, Set tablesUsed) { this(); + this.catName = catName; this.dbName = dbName; this.tblName = tblName; this.tablesUsed = tablesUsed; @@ -157,6 +166,9 @@ public CreationMetadata( * Performs a deep copy on other. */ public CreationMetadata(CreationMetadata other) { + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetDbName()) { this.dbName = other.dbName; } @@ -178,12 +190,36 @@ public CreationMetadata deepCopy() { @Override public void clear() { + this.catName = null; this.dbName = null; this.tblName = null; this.tablesUsed = null; this.validTxnList = null; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getDbName() { return this.dbName; } @@ -293,6 +329,14 @@ public void setValidTxnListIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case DB_NAME: if (value == null) { unsetDbName(); @@ -330,6 +374,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case DB_NAME: return getDbName(); @@ -353,6 +400,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case DB_NAME: return isSetDbName(); case TBL_NAME: @@ -378,6 +427,15 @@ public boolean equals(CreationMetadata that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_dbName = true && this.isSetDbName(); boolean that_present_dbName = true && that.isSetDbName(); if (this_present_dbName || that_present_dbName) { @@ -421,6 +479,11 @@ public boolean equals(CreationMetadata that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_dbName = true && (isSetDbName()); list.add(present_dbName); if (present_dbName) @@ -452,6 +515,16 @@ public int compareTo(CreationMetadata other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); if (lastComparison != 0) { return lastComparison; @@ -512,6 +585,14 @@ public String toString() { StringBuilder sb = new StringBuilder("CreationMetadata("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("dbName:"); if (this.dbName == null) { sb.append("null"); @@ -551,6 +632,10 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields + if (!isSetCatName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'catName' is unset! Struct:" + toString()); + } + if (!isSetDbName()) { throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString()); } @@ -600,7 +685,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreationMetadata st break; } switch (schemeField.id) { - case 1: // DB_NAME + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DB_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); @@ -608,7 +701,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreationMetadata st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TBL_NAME + case 3: // TBL_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); @@ -616,16 +709,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreationMetadata st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // TABLES_USED + case 4: // TABLES_USED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set676 = iprot.readSetBegin(); - struct.tablesUsed = new HashSet(2*_set676.size); - String _elem677; - for (int _i678 = 0; _i678 < _set676.size; ++_i678) + org.apache.thrift.protocol.TSet _set684 = iprot.readSetBegin(); + struct.tablesUsed = new HashSet(2*_set684.size); + String _elem685; + for (int _i686 = 0; _i686 < _set684.size; ++_i686) { - _elem677 = iprot.readString(); - struct.tablesUsed.add(_elem677); + _elem685 = iprot.readString(); + struct.tablesUsed.add(_elem685); } iprot.readSetEnd(); } @@ -634,7 +727,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreationMetadata st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // VALID_TXN_LIST + case 5: // VALID_TXN_LIST if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.validTxnList = iprot.readString(); struct.setValidTxnListIsSet(true); @@ -655,6 +748,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CreationMetadata s struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.dbName != null) { oprot.writeFieldBegin(DB_NAME_FIELD_DESC); oprot.writeString(struct.dbName); @@ -669,9 +767,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CreationMetadata s oprot.writeFieldBegin(TABLES_USED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.tablesUsed.size())); - for (String _iter679 : struct.tablesUsed) + for (String _iter687 : struct.tablesUsed) { - oprot.writeString(_iter679); + oprot.writeString(_iter687); } oprot.writeSetEnd(); } @@ -701,13 +799,14 @@ public CreationMetadataTupleScheme getScheme() { @Override public void write(org.apache.thrift.protocol.TProtocol prot, CreationMetadata struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.catName); oprot.writeString(struct.dbName); oprot.writeString(struct.tblName); { oprot.writeI32(struct.tablesUsed.size()); - for (String _iter680 : struct.tablesUsed) + for (String _iter688 : struct.tablesUsed) { - oprot.writeString(_iter680); + oprot.writeString(_iter688); } } BitSet optionals = new BitSet(); @@ -723,18 +822,20 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CreationMetadata st @Override public void read(org.apache.thrift.protocol.TProtocol prot, CreationMetadata struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TSet _set681 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tablesUsed = new HashSet(2*_set681.size); - String _elem682; - for (int _i683 = 0; _i683 < _set681.size; ++_i683) + org.apache.thrift.protocol.TSet _set689 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tablesUsed = new HashSet(2*_set689.size); + String _elem690; + for (int _i691 = 0; _i691 < _set689.size; ++_i691) { - _elem682 = iprot.readString(); - struct.tablesUsed.add(_elem682); + _elem690 = iprot.readString(); + struct.tablesUsed.add(_elem690); } } struct.setTablesUsedIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java index 1a8c7b5e49..9cde9b8699 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java @@ -45,6 +45,7 @@ private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)5); private static final org.apache.thrift.protocol.TField OWNER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("ownerName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final org.apache.thrift.protocol.TField OWNER_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("ownerType", org.apache.thrift.protocol.TType.I32, (short)7); + private static final org.apache.thrift.protocol.TField CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catalogName", org.apache.thrift.protocol.TType.STRING, (short)8); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -59,6 +60,7 @@ private PrincipalPrivilegeSet privileges; // optional private String ownerName; // optional private PrincipalType ownerType; // optional + private String catalogName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -72,7 +74,8 @@ * * @see PrincipalType */ - OWNER_TYPE((short)7, "ownerType"); + OWNER_TYPE((short)7, "ownerType"), + CATALOG_NAME((short)8, "catalogName"); private static final Map byName = new HashMap(); @@ -101,6 +104,8 @@ public static _Fields findByThriftId(int fieldId) { return OWNER_NAME; case 7: // OWNER_TYPE return OWNER_TYPE; + case 8: // CATALOG_NAME + return CATALOG_NAME; default: return null; } @@ -141,7 +146,7 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.OWNER_NAME,_Fields.OWNER_TYPE}; + private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.OWNER_NAME,_Fields.OWNER_TYPE,_Fields.CATALOG_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -161,6 +166,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.OWNER_TYPE, new org.apache.thrift.meta_data.FieldMetaData("ownerType", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class))); + tmpMap.put(_Fields.CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("catalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Database.class, metaDataMap); } @@ -207,6 +214,9 @@ public Database(Database other) { if (other.isSetOwnerType()) { this.ownerType = other.ownerType; } + if (other.isSetCatalogName()) { + this.catalogName = other.catalogName; + } } public Database deepCopy() { @@ -222,6 +232,7 @@ public void clear() { this.privileges = null; this.ownerName = null; this.ownerType = null; + this.catalogName = null; } public String getName() { @@ -404,6 +415,29 @@ public void setOwnerTypeIsSet(boolean value) { } } + public String getCatalogName() { + return this.catalogName; + } + + public void setCatalogName(String catalogName) { + this.catalogName = catalogName; + } + + public void unsetCatalogName() { + this.catalogName = null; + } + + /** Returns true if field catalogName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatalogName() { + return this.catalogName != null; + } + + public void setCatalogNameIsSet(boolean value) { + if (!value) { + this.catalogName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case NAME: @@ -462,6 +496,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CATALOG_NAME: + if (value == null) { + unsetCatalogName(); + } else { + setCatalogName((String)value); + } + break; + } } @@ -488,6 +530,9 @@ public Object getFieldValue(_Fields field) { case OWNER_TYPE: return getOwnerType(); + case CATALOG_NAME: + return getCatalogName(); + } throw new IllegalStateException(); } @@ -513,6 +558,8 @@ public boolean isSet(_Fields field) { return isSetOwnerName(); case OWNER_TYPE: return isSetOwnerType(); + case CATALOG_NAME: + return isSetCatalogName(); } throw new IllegalStateException(); } @@ -593,6 +640,15 @@ public boolean equals(Database that) { return false; } + boolean this_present_catalogName = true && this.isSetCatalogName(); + boolean that_present_catalogName = true && that.isSetCatalogName(); + if (this_present_catalogName || that_present_catalogName) { + if (!(this_present_catalogName && that_present_catalogName)) + return false; + if (!this.catalogName.equals(that.catalogName)) + return false; + } + return true; } @@ -635,6 +691,11 @@ public int hashCode() { if (present_ownerType) list.add(ownerType.getValue()); + boolean present_catalogName = true && (isSetCatalogName()); + list.add(present_catalogName); + if (present_catalogName) + list.add(catalogName); + return list.hashCode(); } @@ -716,6 +777,16 @@ public int compareTo(Database other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatalogName()).compareTo(other.isSetCatalogName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatalogName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalogName, other.catalogName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -797,6 +868,16 @@ public String toString() { } first = false; } + if (isSetCatalogName()) { + if (!first) sb.append(", "); + sb.append("catalogName:"); + if (this.catalogName == null) { + sb.append("null"); + } else { + sb.append(this.catalogName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -870,15 +951,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Database struct) th case 4: // PARAMETERS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map94 = iprot.readMapBegin(); - struct.parameters = new HashMap(2*_map94.size); - String _key95; - String _val96; - for (int _i97 = 0; _i97 < _map94.size; ++_i97) + org.apache.thrift.protocol.TMap _map102 = iprot.readMapBegin(); + struct.parameters = new HashMap(2*_map102.size); + String _key103; + String _val104; + for (int _i105 = 0; _i105 < _map102.size; ++_i105) { - _key95 = iprot.readString(); - _val96 = iprot.readString(); - struct.parameters.put(_key95, _val96); + _key103 = iprot.readString(); + _val104 = iprot.readString(); + struct.parameters.put(_key103, _val104); } iprot.readMapEnd(); } @@ -912,6 +993,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Database struct) th org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 8: // CATALOG_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catalogName = iprot.readString(); + struct.setCatalogNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -944,10 +1033,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Database struct) t oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size())); - for (Map.Entry _iter98 : struct.parameters.entrySet()) + for (Map.Entry _iter106 : struct.parameters.entrySet()) { - oprot.writeString(_iter98.getKey()); - oprot.writeString(_iter98.getValue()); + oprot.writeString(_iter106.getKey()); + oprot.writeString(_iter106.getValue()); } oprot.writeMapEnd(); } @@ -974,6 +1063,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Database struct) t oprot.writeFieldEnd(); } } + if (struct.catalogName != null) { + if (struct.isSetCatalogName()) { + oprot.writeFieldBegin(CATALOG_NAME_FIELD_DESC); + oprot.writeString(struct.catalogName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1013,7 +1109,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Database struct) th if (struct.isSetOwnerType()) { optionals.set(6); } - oprot.writeBitSet(optionals, 7); + if (struct.isSetCatalogName()) { + optionals.set(7); + } + oprot.writeBitSet(optionals, 8); if (struct.isSetName()) { oprot.writeString(struct.name); } @@ -1026,10 +1125,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Database struct) th if (struct.isSetParameters()) { { oprot.writeI32(struct.parameters.size()); - for (Map.Entry _iter99 : struct.parameters.entrySet()) + for (Map.Entry _iter107 : struct.parameters.entrySet()) { - oprot.writeString(_iter99.getKey()); - oprot.writeString(_iter99.getValue()); + oprot.writeString(_iter107.getKey()); + oprot.writeString(_iter107.getValue()); } } } @@ -1042,12 +1141,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Database struct) th if (struct.isSetOwnerType()) { oprot.writeI32(struct.ownerType.getValue()); } + if (struct.isSetCatalogName()) { + oprot.writeString(struct.catalogName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Database struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(7); + BitSet incoming = iprot.readBitSet(8); if (incoming.get(0)) { struct.name = iprot.readString(); struct.setNameIsSet(true); @@ -1062,15 +1164,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Database struct) thr } if (incoming.get(3)) { { - org.apache.thrift.protocol.TMap _map100 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.parameters = new HashMap(2*_map100.size); - String _key101; - String _val102; - for (int _i103 = 0; _i103 < _map100.size; ++_i103) + org.apache.thrift.protocol.TMap _map108 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.parameters = new HashMap(2*_map108.size); + String _key109; + String _val110; + for (int _i111 = 0; _i111 < _map108.size; ++_i111) { - _key101 = iprot.readString(); - _val102 = iprot.readString(); - struct.parameters.put(_key101, _val102); + _key109 = iprot.readString(); + _val110 = iprot.readString(); + struct.parameters.put(_key109, _val110); } } struct.setParametersIsSet(true); @@ -1088,6 +1190,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Database struct) thr struct.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32()); struct.setOwnerTypeIsSet(true); } + if (incoming.get(7)) { + struct.catalogName = iprot.readString(); + struct.setCatalogNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsRequest.java index 5f4954d2a7..69378c916f 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsRequest.java @@ -38,8 +38,9 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class DefaultConstraintsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DefaultConstraintsRequest"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,13 +48,15 @@ schemes.put(TupleScheme.class, new DefaultConstraintsRequestTupleSchemeFactory()); } + private String catName; // required private String db_name; // required private String tbl_name; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "db_name"), - TBL_NAME((short)2, "tbl_name"); + CAT_NAME((short)1, "catName"), + DB_NAME((short)2, "db_name"), + TBL_NAME((short)3, "tbl_name"); private static final Map byName = new HashMap(); @@ -68,9 +71,11 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME + case 1: // CAT_NAME + return CAT_NAME; + case 2: // DB_NAME return DB_NAME; - case 2: // TBL_NAME + case 3: // TBL_NAME return TBL_NAME; default: return null; @@ -115,6 +120,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.REQUIRED, @@ -127,10 +134,12 @@ public DefaultConstraintsRequest() { } public DefaultConstraintsRequest( + String catName, String db_name, String tbl_name) { this(); + this.catName = catName; this.db_name = db_name; this.tbl_name = tbl_name; } @@ -139,6 +148,9 @@ public DefaultConstraintsRequest( * Performs a deep copy on other. */ public DefaultConstraintsRequest(DefaultConstraintsRequest other) { + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetDb_name()) { this.db_name = other.db_name; } @@ -153,10 +165,34 @@ public DefaultConstraintsRequest deepCopy() { @Override public void clear() { + this.catName = null; this.db_name = null; this.tbl_name = null; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getDb_name() { return this.db_name; } @@ -205,6 +241,14 @@ public void setTbl_nameIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case DB_NAME: if (value == null) { unsetDb_name(); @@ -226,6 +270,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case DB_NAME: return getDb_name(); @@ -243,6 +290,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case DB_NAME: return isSetDb_name(); case TBL_NAME: @@ -264,6 +313,15 @@ public boolean equals(DefaultConstraintsRequest that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_db_name = true && this.isSetDb_name(); boolean that_present_db_name = true && that.isSetDb_name(); if (this_present_db_name || that_present_db_name) { @@ -289,6 +347,11 @@ public boolean equals(DefaultConstraintsRequest that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_db_name = true && (isSetDb_name()); list.add(present_db_name); if (present_db_name) @@ -310,6 +373,16 @@ public int compareTo(DefaultConstraintsRequest other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); if (lastComparison != 0) { return lastComparison; @@ -350,6 +423,14 @@ public String toString() { StringBuilder sb = new StringBuilder("DefaultConstraintsRequest("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("db_name:"); if (this.db_name == null) { sb.append("null"); @@ -371,6 +452,10 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields + if (!isSetCatName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'catName' is unset! Struct:" + toString()); + } + if (!isSetDb_name()) { throw new org.apache.thrift.protocol.TProtocolException("Required field 'db_name' is unset! Struct:" + toString()); } @@ -416,7 +501,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DefaultConstraintsR break; } switch (schemeField.id) { - case 1: // DB_NAME + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DB_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -424,7 +517,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DefaultConstraintsR org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TBL_NAME + case 3: // TBL_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); @@ -445,6 +538,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, DefaultConstraints struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.db_name != null) { oprot.writeFieldBegin(DB_NAME_FIELD_DESC); oprot.writeString(struct.db_name); @@ -472,6 +570,7 @@ public DefaultConstraintsRequestTupleScheme getScheme() { @Override public void write(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsRequest struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.catName); oprot.writeString(struct.db_name); oprot.writeString(struct.tbl_name); } @@ -479,6 +578,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsR @Override public void read(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); struct.tbl_name = iprot.readString(); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java index f7ee187d6e..47b8d1cef0 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DefaultConstraintsR case 1: // DEFAULT_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list344 = iprot.readListBegin(); - struct.defaultConstraints = new ArrayList(_list344.size); - SQLDefaultConstraint _elem345; - for (int _i346 = 0; _i346 < _list344.size; ++_i346) + org.apache.thrift.protocol.TList _list352 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list352.size); + SQLDefaultConstraint _elem353; + for (int _i354 = 0; _i354 < _list352.size; ++_i354) { - _elem345 = new SQLDefaultConstraint(); - _elem345.read(iprot); - struct.defaultConstraints.add(_elem345); + _elem353 = new SQLDefaultConstraint(); + _elem353.read(iprot); + struct.defaultConstraints.add(_elem353); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, DefaultConstraints oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); - for (SQLDefaultConstraint _iter347 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter355 : struct.defaultConstraints) { - _iter347.write(oprot); + _iter355.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsR TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.defaultConstraints.size()); - for (SQLDefaultConstraint _iter348 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter356 : struct.defaultConstraints) { - _iter348.write(oprot); + _iter356.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsR public void read(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraints = new ArrayList(_list349.size); - SQLDefaultConstraint _elem350; - for (int _i351 = 0; _i351 < _list349.size; ++_i351) + org.apache.thrift.protocol.TList _list357 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list357.size); + SQLDefaultConstraint _elem358; + for (int _i359 = 0; _i359 < _list357.size; ++_i359) { - _elem350 = new SQLDefaultConstraint(); - _elem350.read(iprot); - struct.defaultConstraints.add(_elem350); + _elem358 = new SQLDefaultConstraint(); + _elem358.read(iprot); + struct.defaultConstraints.add(_elem358); } } struct.setDefaultConstraintsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropCatalogRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropCatalogRequest.java new file mode 100644 index 0000000000..a11fe47ab6 --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropCatalogRequest.java @@ -0,0 +1,395 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class DropCatalogRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DropCatalogRequest"); + + private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new DropCatalogRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new DropCatalogRequestTupleSchemeFactory()); + } + + private String name; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + NAME((short)1, "name"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // NAME + return NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DropCatalogRequest.class, metaDataMap); + } + + public DropCatalogRequest() { + } + + public DropCatalogRequest( + String name) + { + this(); + this.name = name; + } + + /** + * Performs a deep copy on other. + */ + public DropCatalogRequest(DropCatalogRequest other) { + if (other.isSetName()) { + this.name = other.name; + } + } + + public DropCatalogRequest deepCopy() { + return new DropCatalogRequest(this); + } + + @Override + public void clear() { + this.name = null; + } + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public void unsetName() { + this.name = null; + } + + /** Returns true if field name is set (has been assigned a value) and false otherwise */ + public boolean isSetName() { + return this.name != null; + } + + public void setNameIsSet(boolean value) { + if (!value) { + this.name = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case NAME: + if (value == null) { + unsetName(); + } else { + setName((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case NAME: + return getName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case NAME: + return isSetName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof DropCatalogRequest) + return this.equals((DropCatalogRequest)that); + return false; + } + + public boolean equals(DropCatalogRequest that) { + if (that == null) + return false; + + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) + return false; + if (!this.name.equals(that.name)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_name = true && (isSetName()); + list.add(present_name); + if (present_name) + list.add(name); + + return list.hashCode(); + } + + @Override + public int compareTo(DropCatalogRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("DropCatalogRequest("); + boolean first = true; + + sb.append("name:"); + if (this.name == null) { + sb.append("null"); + } else { + sb.append(this.name); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class DropCatalogRequestStandardSchemeFactory implements SchemeFactory { + public DropCatalogRequestStandardScheme getScheme() { + return new DropCatalogRequestStandardScheme(); + } + } + + private static class DropCatalogRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, DropCatalogRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, DropCatalogRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(struct.name); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class DropCatalogRequestTupleSchemeFactory implements SchemeFactory { + public DropCatalogRequestTupleScheme getScheme() { + return new DropCatalogRequestTupleScheme(); + } + } + + private static class DropCatalogRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, DropCatalogRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetName()) { + oprot.writeString(struct.name); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, DropCatalogRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropConstraintRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropConstraintRequest.java index 98f1531b23..a9c58929b8 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropConstraintRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropConstraintRequest.java @@ -41,6 +41,7 @@ private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TABLENAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tablename", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField CONSTRAINTNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("constraintname", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -51,12 +52,14 @@ private String dbname; // required private String tablename; // required private String constraintname; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DBNAME((short)1, "dbname"), TABLENAME((short)2, "tablename"), - CONSTRAINTNAME((short)3, "constraintname"); + CONSTRAINTNAME((short)3, "constraintname"), + CAT_NAME((short)4, "catName"); private static final Map byName = new HashMap(); @@ -77,6 +80,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLENAME; case 3: // CONSTRAINTNAME return CONSTRAINTNAME; + case 4: // CAT_NAME + return CAT_NAME; default: return null; } @@ -117,6 +122,7 @@ public String getFieldName() { } // isset id assignments + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -126,6 +132,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.CONSTRAINTNAME, new org.apache.thrift.meta_data.FieldMetaData("constraintname", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DropConstraintRequest.class, metaDataMap); } @@ -157,6 +165,9 @@ public DropConstraintRequest(DropConstraintRequest other) { if (other.isSetConstraintname()) { this.constraintname = other.constraintname; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public DropConstraintRequest deepCopy() { @@ -168,6 +179,7 @@ public void clear() { this.dbname = null; this.tablename = null; this.constraintname = null; + this.catName = null; } public String getDbname() { @@ -239,6 +251,29 @@ public void setConstraintnameIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DBNAME: @@ -265,6 +300,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -279,6 +322,9 @@ public Object getFieldValue(_Fields field) { case CONSTRAINTNAME: return getConstraintname(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -296,6 +342,8 @@ public boolean isSet(_Fields field) { return isSetTablename(); case CONSTRAINTNAME: return isSetConstraintname(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -340,6 +388,15 @@ public boolean equals(DropConstraintRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -362,6 +419,11 @@ public int hashCode() { if (present_constraintname) list.add(constraintname); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -403,6 +465,16 @@ public int compareTo(DropConstraintRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -446,6 +518,16 @@ public String toString() { sb.append(this.constraintname); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -525,6 +607,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DropConstraintReque org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -553,6 +643,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, DropConstraintRequ oprot.writeString(struct.constraintname); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -573,6 +670,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DropConstraintReque oprot.writeString(struct.dbname); oprot.writeString(struct.tablename); oprot.writeString(struct.constraintname); + BitSet optionals = new BitSet(); + if (struct.isSetCatName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -584,6 +689,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, DropConstraintReques struct.setTablenameIsSet(true); struct.constraintname = iprot.readString(); struct.setConstraintnameIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java index 6927c781c5..443f08e277 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java @@ -46,6 +46,7 @@ private static final org.apache.thrift.protocol.TField IGNORE_PROTECTION_FIELD_DESC = new org.apache.thrift.protocol.TField("ignoreProtection", org.apache.thrift.protocol.TType.BOOL, (short)6); private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environmentContext", org.apache.thrift.protocol.TType.STRUCT, (short)7); private static final org.apache.thrift.protocol.TField NEED_RESULT_FIELD_DESC = new org.apache.thrift.protocol.TField("needResult", org.apache.thrift.protocol.TType.BOOL, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -61,6 +62,7 @@ private boolean ignoreProtection; // optional private EnvironmentContext environmentContext; // optional private boolean needResult; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -71,7 +73,8 @@ IF_EXISTS((short)5, "ifExists"), IGNORE_PROTECTION((short)6, "ignoreProtection"), ENVIRONMENT_CONTEXT((short)7, "environmentContext"), - NEED_RESULT((short)8, "needResult"); + NEED_RESULT((short)8, "needResult"), + CAT_NAME((short)9, "catName"); private static final Map byName = new HashMap(); @@ -102,6 +105,8 @@ public static _Fields findByThriftId(int fieldId) { return ENVIRONMENT_CONTEXT; case 8: // NEED_RESULT return NEED_RESULT; + case 9: // CAT_NAME + return CAT_NAME; default: return null; } @@ -147,7 +152,7 @@ public String getFieldName() { private static final int __IGNOREPROTECTION_ISSET_ID = 2; private static final int __NEEDRESULT_ISSET_ID = 3; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.DELETE_DATA,_Fields.IF_EXISTS,_Fields.IGNORE_PROTECTION,_Fields.ENVIRONMENT_CONTEXT,_Fields.NEED_RESULT}; + private static final _Fields optionals[] = {_Fields.DELETE_DATA,_Fields.IF_EXISTS,_Fields.IGNORE_PROTECTION,_Fields.ENVIRONMENT_CONTEXT,_Fields.NEED_RESULT,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -167,6 +172,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); tmpMap.put(_Fields.NEED_RESULT, new org.apache.thrift.meta_data.FieldMetaData("needResult", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DropPartitionsRequest.class, metaDataMap); } @@ -210,6 +217,9 @@ public DropPartitionsRequest(DropPartitionsRequest other) { this.environmentContext = new EnvironmentContext(other.environmentContext); } this.needResult = other.needResult; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public DropPartitionsRequest deepCopy() { @@ -230,6 +240,7 @@ public void clear() { this.environmentContext = null; this.needResult = true; + this.catName = null; } public String getDbName() { @@ -412,6 +423,29 @@ public void setNeedResultIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NEEDRESULT_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -478,6 +512,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -507,6 +549,9 @@ public Object getFieldValue(_Fields field) { case NEED_RESULT: return isNeedResult(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -534,6 +579,8 @@ public boolean isSet(_Fields field) { return isSetEnvironmentContext(); case NEED_RESULT: return isSetNeedResult(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -623,6 +670,15 @@ public boolean equals(DropPartitionsRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -670,6 +726,11 @@ public int hashCode() { if (present_needResult) list.add(needResult); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -761,6 +822,16 @@ public int compareTo(DropPartitionsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -838,6 +909,16 @@ public String toString() { sb.append(this.needResult); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -964,6 +1045,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DropPartitionsReque org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 9: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1019,6 +1108,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, DropPartitionsRequ oprot.writeBool(struct.needResult); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1055,7 +1151,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DropPartitionsReque if (struct.isSetNeedResult()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetCatName()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetDeleteData()) { oprot.writeBool(struct.deleteData); } @@ -1071,6 +1170,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DropPartitionsReque if (struct.isSetNeedResult()) { oprot.writeBool(struct.needResult); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -1083,7 +1185,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, DropPartitionsReques struct.parts = new RequestPartsSpec(); struct.parts.read(iprot); struct.setPartsIsSet(true); - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { struct.deleteData = iprot.readBool(); struct.setDeleteDataIsSet(true); @@ -1105,6 +1207,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, DropPartitionsReques struct.needResult = iprot.readBool(); struct.setNeedResultIsSet(true); } + if (incoming.get(5)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java index e3f9161628..0f22168664 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DropPartitionsResul case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list482 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list482.size); - Partition _elem483; - for (int _i484 = 0; _i484 < _list482.size; ++_i484) + org.apache.thrift.protocol.TList _list490 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list490.size); + Partition _elem491; + for (int _i492 = 0; _i492 < _list490.size; ++_i492) { - _elem483 = new Partition(); - _elem483.read(iprot); - struct.partitions.add(_elem483); + _elem491 = new Partition(); + _elem491.read(iprot); + struct.partitions.add(_elem491); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, DropPartitionsResu oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter485 : struct.partitions) + for (Partition _iter493 : struct.partitions) { - _iter485.write(oprot); + _iter493.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DropPartitionsResul if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (Partition _iter486 : struct.partitions) + for (Partition _iter494 : struct.partitions) { - _iter486.write(oprot); + _iter494.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, DropPartitionsResult BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list487 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list487.size); - Partition _elem488; - for (int _i489 = 0; _i489 < _list487.size; ++_i489) + org.apache.thrift.protocol.TList _list495 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list495.size); + Partition _elem496; + for (int _i497 = 0; _i497 < _list495.size; ++_i497) { - _elem488 = new Partition(); - _elem488.read(iprot); - struct.partitions.add(_elem488); + _elem496 = new Partition(); + _elem496.read(iprot); + struct.partitions.add(_elem496); } } struct.setPartitionsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java index e420b9e35c..52fae26213 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java @@ -344,15 +344,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, EnvironmentContext case 1: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map302 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map302.size); - String _key303; - String _val304; - for (int _i305 = 0; _i305 < _map302.size; ++_i305) + org.apache.thrift.protocol.TMap _map310 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map310.size); + String _key311; + String _val312; + for (int _i313 = 0; _i313 < _map310.size; ++_i313) { - _key303 = iprot.readString(); - _val304 = iprot.readString(); - struct.properties.put(_key303, _val304); + _key311 = iprot.readString(); + _val312 = iprot.readString(); + struct.properties.put(_key311, _val312); } iprot.readMapEnd(); } @@ -378,10 +378,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, EnvironmentContext oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter306 : struct.properties.entrySet()) + for (Map.Entry _iter314 : struct.properties.entrySet()) { - oprot.writeString(_iter306.getKey()); - oprot.writeString(_iter306.getValue()); + oprot.writeString(_iter314.getKey()); + oprot.writeString(_iter314.getValue()); } oprot.writeMapEnd(); } @@ -412,10 +412,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, EnvironmentContext if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter307 : struct.properties.entrySet()) + for (Map.Entry _iter315 : struct.properties.entrySet()) { - oprot.writeString(_iter307.getKey()); - oprot.writeString(_iter307.getValue()); + oprot.writeString(_iter315.getKey()); + oprot.writeString(_iter315.getValue()); } } } @@ -427,15 +427,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, EnvironmentContext s BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map308 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map308.size); - String _key309; - String _val310; - for (int _i311 = 0; _i311 < _map308.size; ++_i311) + org.apache.thrift.protocol.TMap _map316 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map316.size); + String _key317; + String _val318; + for (int _i319 = 0; _i319 < _map316.size; ++_i319) { - _key309 = iprot.readString(); - _val310 = iprot.readString(); - struct.properties.put(_key309, _val310); + _key317 = iprot.readString(); + _val318 = iprot.readString(); + struct.properties.put(_key317, _val318); } } struct.setPropertiesIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java index 807f8263a0..b95efc7e87 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FindSchemasByColsRe case 1: // SCHEMA_VERSIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list888 = iprot.readListBegin(); - struct.schemaVersions = new ArrayList(_list888.size); - SchemaVersionDescriptor _elem889; - for (int _i890 = 0; _i890 < _list888.size; ++_i890) + org.apache.thrift.protocol.TList _list896 = iprot.readListBegin(); + struct.schemaVersions = new ArrayList(_list896.size); + SchemaVersionDescriptor _elem897; + for (int _i898 = 0; _i898 < _list896.size; ++_i898) { - _elem889 = new SchemaVersionDescriptor(); - _elem889.read(iprot); - struct.schemaVersions.add(_elem889); + _elem897 = new SchemaVersionDescriptor(); + _elem897.read(iprot); + struct.schemaVersions.add(_elem897); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FindSchemasByColsR oprot.writeFieldBegin(SCHEMA_VERSIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.schemaVersions.size())); - for (SchemaVersionDescriptor _iter891 : struct.schemaVersions) + for (SchemaVersionDescriptor _iter899 : struct.schemaVersions) { - _iter891.write(oprot); + _iter899.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRe if (struct.isSetSchemaVersions()) { { oprot.writeI32(struct.schemaVersions.size()); - for (SchemaVersionDescriptor _iter892 : struct.schemaVersions) + for (SchemaVersionDescriptor _iter900 : struct.schemaVersions) { - _iter892.write(oprot); + _iter900.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRes BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.schemaVersions = new ArrayList(_list893.size); - SchemaVersionDescriptor _elem894; - for (int _i895 = 0; _i895 < _list893.size; ++_i895) + org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.schemaVersions = new ArrayList(_list901.size); + SchemaVersionDescriptor _elem902; + for (int _i903 = 0; _i903 < _list901.size; ++_i903) { - _elem894 = new SchemaVersionDescriptor(); - _elem894.read(iprot); - struct.schemaVersions.add(_elem894); + _elem902 = new SchemaVersionDescriptor(); + _elem902.read(iprot); + struct.schemaVersions.add(_elem902); } } struct.setSchemaVersionsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java index 58b1d7cf92..ddc0a6a2b1 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField PARTITION_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionVals", org.apache.thrift.protocol.TType.LIST, (short)5); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private String dbName; // optional private String tableName; // optional private List partitionVals; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +64,8 @@ DATA((short)2, "data"), DB_NAME((short)3, "dbName"), TABLE_NAME((short)4, "tableName"), - PARTITION_VALS((short)5, "partitionVals"); + PARTITION_VALS((short)5, "partitionVals"), + CAT_NAME((short)6, "catName"); private static final Map byName = new HashMap(); @@ -87,6 +90,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLE_NAME; case 5: // PARTITION_VALS return PARTITION_VALS; + case 6: // CAT_NAME + return CAT_NAME; default: return null; } @@ -129,7 +134,7 @@ public String getFieldName() { // isset id assignments private static final int __SUCCESSFUL_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.DB_NAME,_Fields.TABLE_NAME,_Fields.PARTITION_VALS}; + private static final _Fields optionals[] = {_Fields.DB_NAME,_Fields.TABLE_NAME,_Fields.PARTITION_VALS,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -144,6 +149,8 @@ public String getFieldName() { tmpMap.put(_Fields.PARTITION_VALS, new org.apache.thrift.meta_data.FieldMetaData("partitionVals", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FireEventRequest.class, metaDataMap); } @@ -180,6 +187,9 @@ public FireEventRequest(FireEventRequest other) { List __this__partitionVals = new ArrayList(other.partitionVals); this.partitionVals = __this__partitionVals; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public FireEventRequest deepCopy() { @@ -194,6 +204,7 @@ public void clear() { this.dbName = null; this.tableName = null; this.partitionVals = null; + this.catName = null; } public boolean isSuccessful() { @@ -325,6 +336,29 @@ public void setPartitionValsIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case SUCCESSFUL: @@ -367,6 +401,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -387,6 +429,9 @@ public Object getFieldValue(_Fields field) { case PARTITION_VALS: return getPartitionVals(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -408,6 +453,8 @@ public boolean isSet(_Fields field) { return isSetTableName(); case PARTITION_VALS: return isSetPartitionVals(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -470,6 +517,15 @@ public boolean equals(FireEventRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -502,6 +558,11 @@ public int hashCode() { if (present_partitionVals) list.add(partitionVals); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -563,6 +624,16 @@ public int compareTo(FireEventRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -624,6 +695,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -713,13 +794,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FireEventRequest st case 5: // PARTITION_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list708 = iprot.readListBegin(); - struct.partitionVals = new ArrayList(_list708.size); - String _elem709; - for (int _i710 = 0; _i710 < _list708.size; ++_i710) + org.apache.thrift.protocol.TList _list716 = iprot.readListBegin(); + struct.partitionVals = new ArrayList(_list716.size); + String _elem717; + for (int _i718 = 0; _i718 < _list716.size; ++_i718) { - _elem709 = iprot.readString(); - struct.partitionVals.add(_elem709); + _elem717 = iprot.readString(); + struct.partitionVals.add(_elem717); } iprot.readListEnd(); } @@ -728,6 +809,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FireEventRequest st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -768,15 +857,22 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FireEventRequest s oprot.writeFieldBegin(PARTITION_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionVals.size())); - for (String _iter711 : struct.partitionVals) + for (String _iter719 : struct.partitionVals) { - oprot.writeString(_iter711); + oprot.writeString(_iter719); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -806,7 +902,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FireEventRequest st if (struct.isSetPartitionVals()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetCatName()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDbName()) { oprot.writeString(struct.dbName); } @@ -816,12 +915,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FireEventRequest st if (struct.isSetPartitionVals()) { { oprot.writeI32(struct.partitionVals.size()); - for (String _iter712 : struct.partitionVals) + for (String _iter720 : struct.partitionVals) { - oprot.writeString(_iter712); + oprot.writeString(_iter720); } } } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -832,7 +934,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FireEventRequest str struct.data = new FireEventRequestData(); struct.data.read(iprot); struct.setDataIsSet(true); - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); @@ -843,17 +945,21 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FireEventRequest str } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list713 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionVals = new ArrayList(_list713.size); - String _elem714; - for (int _i715 = 0; _i715 < _list713.size; ++_i715) + org.apache.thrift.protocol.TList _list721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionVals = new ArrayList(_list721.size); + String _elem722; + for (int _i723 = 0; _i723 < _list721.size; ++_i723) { - _elem714 = iprot.readString(); - struct.partitionVals.add(_elem714); + _elem722 = iprot.readString(); + struct.partitionVals.add(_elem722); } } struct.setPartitionValsIsSet(true); } + if (incoming.get(3)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysRequest.java index e4882c7ab6..2f2fcfa066 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysRequest.java @@ -42,6 +42,7 @@ private static final org.apache.thrift.protocol.TField PARENT_TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("parent_tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField FOREIGN_DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("foreign_db_name", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField FOREIGN_TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("foreign_tbl_name", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,13 +54,15 @@ private String parent_tbl_name; // required private String foreign_db_name; // required private String foreign_tbl_name; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { PARENT_DB_NAME((short)1, "parent_db_name"), PARENT_TBL_NAME((short)2, "parent_tbl_name"), FOREIGN_DB_NAME((short)3, "foreign_db_name"), - FOREIGN_TBL_NAME((short)4, "foreign_tbl_name"); + FOREIGN_TBL_NAME((short)4, "foreign_tbl_name"), + CAT_NAME((short)5, "catName"); private static final Map byName = new HashMap(); @@ -82,6 +85,8 @@ public static _Fields findByThriftId(int fieldId) { return FOREIGN_DB_NAME; case 4: // FOREIGN_TBL_NAME return FOREIGN_TBL_NAME; + case 5: // CAT_NAME + return CAT_NAME; default: return null; } @@ -122,6 +127,7 @@ public String getFieldName() { } // isset id assignments + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -133,6 +139,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.FOREIGN_TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("foreign_tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ForeignKeysRequest.class, metaDataMap); } @@ -169,6 +177,9 @@ public ForeignKeysRequest(ForeignKeysRequest other) { if (other.isSetForeign_tbl_name()) { this.foreign_tbl_name = other.foreign_tbl_name; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public ForeignKeysRequest deepCopy() { @@ -181,6 +192,7 @@ public void clear() { this.parent_tbl_name = null; this.foreign_db_name = null; this.foreign_tbl_name = null; + this.catName = null; } public String getParent_db_name() { @@ -275,6 +287,29 @@ public void setForeign_tbl_nameIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case PARENT_DB_NAME: @@ -309,6 +344,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -326,6 +369,9 @@ public Object getFieldValue(_Fields field) { case FOREIGN_TBL_NAME: return getForeign_tbl_name(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -345,6 +391,8 @@ public boolean isSet(_Fields field) { return isSetForeign_db_name(); case FOREIGN_TBL_NAME: return isSetForeign_tbl_name(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -398,6 +446,15 @@ public boolean equals(ForeignKeysRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -425,6 +482,11 @@ public int hashCode() { if (present_foreign_tbl_name) list.add(foreign_tbl_name); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -476,6 +538,16 @@ public int compareTo(ForeignKeysRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -527,6 +599,16 @@ public String toString() { sb.append(this.foreign_tbl_name); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -602,6 +684,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ForeignKeysRequest org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -635,6 +725,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ForeignKeysRequest oprot.writeString(struct.foreign_tbl_name); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -665,7 +762,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ForeignKeysRequest if (struct.isSetForeign_tbl_name()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetCatName()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetParent_db_name()) { oprot.writeString(struct.parent_db_name); } @@ -678,12 +778,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ForeignKeysRequest if (struct.isSetForeign_tbl_name()) { oprot.writeString(struct.foreign_tbl_name); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, ForeignKeysRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.parent_db_name = iprot.readString(); struct.setParent_db_nameIsSet(true); @@ -700,6 +803,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ForeignKeysRequest s struct.foreign_tbl_name = iprot.readString(); struct.setForeign_tbl_nameIsSet(true); } + if (incoming.get(4)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysResponse.java index 081adeb1f0..2890506453 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ForeignKeysResponse case 1: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list320 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list320.size); - SQLForeignKey _elem321; - for (int _i322 = 0; _i322 < _list320.size; ++_i322) + org.apache.thrift.protocol.TList _list328 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list328.size); + SQLForeignKey _elem329; + for (int _i330 = 0; _i330 < _list328.size; ++_i330) { - _elem321 = new SQLForeignKey(); - _elem321.read(iprot); - struct.foreignKeys.add(_elem321); + _elem329 = new SQLForeignKey(); + _elem329.read(iprot); + struct.foreignKeys.add(_elem329); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ForeignKeysRespons oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter323 : struct.foreignKeys) + for (SQLForeignKey _iter331 : struct.foreignKeys) { - _iter323.write(oprot); + _iter331.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ForeignKeysResponse TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter324 : struct.foreignKeys) + for (SQLForeignKey _iter332 : struct.foreignKeys) { - _iter324.write(oprot); + _iter332.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ForeignKeysResponse public void read(org.apache.thrift.protocol.TProtocol prot, ForeignKeysResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list325 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list325.size); - SQLForeignKey _elem326; - for (int _i327 = 0; _i327 < _list325.size; ++_i327) + org.apache.thrift.protocol.TList _list333 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list333.size); + SQLForeignKey _elem334; + for (int _i335 = 0; _i335 < _list333.size; ++_i335) { - _elem326 = new SQLForeignKey(); - _elem326.read(iprot); - struct.foreignKeys.add(_elem326); + _elem334 = new SQLForeignKey(); + _elem334.read(iprot); + struct.foreignKeys.add(_elem334); } } struct.setForeignKeysIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java index 2a6c28d311..a1c0de95bd 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java @@ -46,6 +46,7 @@ private static final org.apache.thrift.protocol.TField CREATE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("createTime", org.apache.thrift.protocol.TType.I32, (short)6); private static final org.apache.thrift.protocol.TField FUNCTION_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("functionType", org.apache.thrift.protocol.TType.I32, (short)7); private static final org.apache.thrift.protocol.TField RESOURCE_URIS_FIELD_DESC = new org.apache.thrift.protocol.TField("resourceUris", org.apache.thrift.protocol.TType.LIST, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -61,6 +62,7 @@ private int createTime; // required private FunctionType functionType; // required private List resourceUris; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -79,7 +81,8 @@ * @see FunctionType */ FUNCTION_TYPE((short)7, "functionType"), - RESOURCE_URIS((short)8, "resourceUris"); + RESOURCE_URIS((short)8, "resourceUris"), + CAT_NAME((short)9, "catName"); private static final Map byName = new HashMap(); @@ -110,6 +113,8 @@ public static _Fields findByThriftId(int fieldId) { return FUNCTION_TYPE; case 8: // RESOURCE_URIS return RESOURCE_URIS; + case 9: // CAT_NAME + return CAT_NAME; default: return null; } @@ -152,6 +157,7 @@ public String getFieldName() { // isset id assignments private static final int __CREATETIME_ISSET_ID = 0; private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -172,6 +178,8 @@ public String getFieldName() { tmpMap.put(_Fields.RESOURCE_URIS, new org.apache.thrift.meta_data.FieldMetaData("resourceUris", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ResourceUri.class)))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Function.class, metaDataMap); } @@ -232,6 +240,9 @@ public Function(Function other) { } this.resourceUris = __this__resourceUris; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public Function deepCopy() { @@ -249,6 +260,7 @@ public void clear() { this.createTime = 0; this.functionType = null; this.resourceUris = null; + this.catName = null; } public String getFunctionName() { @@ -465,6 +477,29 @@ public void setResourceUrisIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case FUNCTION_NAME: @@ -531,6 +566,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -560,6 +603,9 @@ public Object getFieldValue(_Fields field) { case RESOURCE_URIS: return getResourceUris(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -587,6 +633,8 @@ public boolean isSet(_Fields field) { return isSetFunctionType(); case RESOURCE_URIS: return isSetResourceUris(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -676,6 +724,15 @@ public boolean equals(Function that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -723,6 +780,11 @@ public int hashCode() { if (present_resourceUris) list.add(resourceUris); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -814,6 +876,16 @@ public int compareTo(Function other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -893,6 +965,16 @@ public String toString() { sb.append(this.resourceUris); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -997,14 +1079,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Function struct) th case 8: // RESOURCE_URIS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list538 = iprot.readListBegin(); - struct.resourceUris = new ArrayList(_list538.size); - ResourceUri _elem539; - for (int _i540 = 0; _i540 < _list538.size; ++_i540) + org.apache.thrift.protocol.TList _list546 = iprot.readListBegin(); + struct.resourceUris = new ArrayList(_list546.size); + ResourceUri _elem547; + for (int _i548 = 0; _i548 < _list546.size; ++_i548) { - _elem539 = new ResourceUri(); - _elem539.read(iprot); - struct.resourceUris.add(_elem539); + _elem547 = new ResourceUri(); + _elem547.read(iprot); + struct.resourceUris.add(_elem547); } iprot.readListEnd(); } @@ -1013,6 +1095,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Function struct) th org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 9: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1063,14 +1153,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Function struct) t oprot.writeFieldBegin(RESOURCE_URIS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourceUris.size())); - for (ResourceUri _iter541 : struct.resourceUris) + for (ResourceUri _iter549 : struct.resourceUris) { - _iter541.write(oprot); + _iter549.write(oprot); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1113,7 +1210,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Function struct) th if (struct.isSetResourceUris()) { optionals.set(7); } - oprot.writeBitSet(optionals, 8); + if (struct.isSetCatName()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); if (struct.isSetFunctionName()) { oprot.writeString(struct.functionName); } @@ -1138,18 +1238,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Function struct) th if (struct.isSetResourceUris()) { { oprot.writeI32(struct.resourceUris.size()); - for (ResourceUri _iter542 : struct.resourceUris) + for (ResourceUri _iter550 : struct.resourceUris) { - _iter542.write(oprot); + _iter550.write(oprot); } } } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Function struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(8); + BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { struct.functionName = iprot.readString(); struct.setFunctionNameIsSet(true); @@ -1180,18 +1283,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Function struct) thr } if (incoming.get(7)) { { - org.apache.thrift.protocol.TList _list543 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.resourceUris = new ArrayList(_list543.size); - ResourceUri _elem544; - for (int _i545 = 0; _i545 < _list543.size; ++_i545) + org.apache.thrift.protocol.TList _list551 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.resourceUris = new ArrayList(_list551.size); + ResourceUri _elem552; + for (int _i553 = 0; _i553 < _list551.size; ++_i553) { - _elem544 = new ResourceUri(); - _elem544.read(iprot); - struct.resourceUris.add(_elem544); + _elem552 = new ResourceUri(); + _elem552.read(iprot); + struct.resourceUris.add(_elem552); } } struct.setResourceUrisIsSet(true); } + if (incoming.get(8)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java index 522fb92bf9..0c5f62b9e2 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetAllFunctionsResp case 1: // FUNCTIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list776 = iprot.readListBegin(); - struct.functions = new ArrayList(_list776.size); - Function _elem777; - for (int _i778 = 0; _i778 < _list776.size; ++_i778) + org.apache.thrift.protocol.TList _list784 = iprot.readListBegin(); + struct.functions = new ArrayList(_list784.size); + Function _elem785; + for (int _i786 = 0; _i786 < _list784.size; ++_i786) { - _elem777 = new Function(); - _elem777.read(iprot); - struct.functions.add(_elem777); + _elem785 = new Function(); + _elem785.read(iprot); + struct.functions.add(_elem785); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetAllFunctionsRes oprot.writeFieldBegin(FUNCTIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.functions.size())); - for (Function _iter779 : struct.functions) + for (Function _iter787 : struct.functions) { - _iter779.write(oprot); + _iter787.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResp if (struct.isSetFunctions()) { { oprot.writeI32(struct.functions.size()); - for (Function _iter780 : struct.functions) + for (Function _iter788 : struct.functions) { - _iter780.write(oprot); + _iter788.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsRespo BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list781 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.functions = new ArrayList(_list781.size); - Function _elem782; - for (int _i783 = 0; _i783 < _list781.size; ++_i783) + org.apache.thrift.protocol.TList _list789 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.functions = new ArrayList(_list789.size); + Function _elem790; + for (int _i791 = 0; _i791 < _list789.size; ++_i791) { - _elem782 = new Function(); - _elem782.read(iprot); - struct.functions.add(_elem782); + _elem790 = new Function(); + _elem790.read(iprot); + struct.functions.add(_elem790); } } struct.setFunctionsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogRequest.java new file mode 100644 index 0000000000..c0e6240736 --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogRequest.java @@ -0,0 +1,395 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetCatalogRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetCatalogRequest"); + + private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetCatalogRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetCatalogRequestTupleSchemeFactory()); + } + + private String name; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + NAME((short)1, "name"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // NAME + return NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetCatalogRequest.class, metaDataMap); + } + + public GetCatalogRequest() { + } + + public GetCatalogRequest( + String name) + { + this(); + this.name = name; + } + + /** + * Performs a deep copy on other. + */ + public GetCatalogRequest(GetCatalogRequest other) { + if (other.isSetName()) { + this.name = other.name; + } + } + + public GetCatalogRequest deepCopy() { + return new GetCatalogRequest(this); + } + + @Override + public void clear() { + this.name = null; + } + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public void unsetName() { + this.name = null; + } + + /** Returns true if field name is set (has been assigned a value) and false otherwise */ + public boolean isSetName() { + return this.name != null; + } + + public void setNameIsSet(boolean value) { + if (!value) { + this.name = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case NAME: + if (value == null) { + unsetName(); + } else { + setName((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case NAME: + return getName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case NAME: + return isSetName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetCatalogRequest) + return this.equals((GetCatalogRequest)that); + return false; + } + + public boolean equals(GetCatalogRequest that) { + if (that == null) + return false; + + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) + return false; + if (!this.name.equals(that.name)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_name = true && (isSetName()); + list.add(present_name); + if (present_name) + list.add(name); + + return list.hashCode(); + } + + @Override + public int compareTo(GetCatalogRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetCatalogRequest("); + boolean first = true; + + sb.append("name:"); + if (this.name == null) { + sb.append("null"); + } else { + sb.append(this.name); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetCatalogRequestStandardSchemeFactory implements SchemeFactory { + public GetCatalogRequestStandardScheme getScheme() { + return new GetCatalogRequestStandardScheme(); + } + } + + private static class GetCatalogRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetCatalogRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetCatalogRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(struct.name); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetCatalogRequestTupleSchemeFactory implements SchemeFactory { + public GetCatalogRequestTupleScheme getScheme() { + return new GetCatalogRequestTupleScheme(); + } + } + + private static class GetCatalogRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetCatalogRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetName()) { + oprot.writeString(struct.name); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetCatalogRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogResponse.java new file mode 100644 index 0000000000..096f5efd3a --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogResponse.java @@ -0,0 +1,400 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetCatalogResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetCatalogResponse"); + + private static final org.apache.thrift.protocol.TField CATALOG_FIELD_DESC = new org.apache.thrift.protocol.TField("catalog", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetCatalogResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetCatalogResponseTupleSchemeFactory()); + } + + private Catalog catalog; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + CATALOG((short)1, "catalog"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // CATALOG + return CATALOG; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CATALOG, new org.apache.thrift.meta_data.FieldMetaData("catalog", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Catalog.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetCatalogResponse.class, metaDataMap); + } + + public GetCatalogResponse() { + } + + public GetCatalogResponse( + Catalog catalog) + { + this(); + this.catalog = catalog; + } + + /** + * Performs a deep copy on other. + */ + public GetCatalogResponse(GetCatalogResponse other) { + if (other.isSetCatalog()) { + this.catalog = new Catalog(other.catalog); + } + } + + public GetCatalogResponse deepCopy() { + return new GetCatalogResponse(this); + } + + @Override + public void clear() { + this.catalog = null; + } + + public Catalog getCatalog() { + return this.catalog; + } + + public void setCatalog(Catalog catalog) { + this.catalog = catalog; + } + + public void unsetCatalog() { + this.catalog = null; + } + + /** Returns true if field catalog is set (has been assigned a value) and false otherwise */ + public boolean isSetCatalog() { + return this.catalog != null; + } + + public void setCatalogIsSet(boolean value) { + if (!value) { + this.catalog = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case CATALOG: + if (value == null) { + unsetCatalog(); + } else { + setCatalog((Catalog)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case CATALOG: + return getCatalog(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case CATALOG: + return isSetCatalog(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetCatalogResponse) + return this.equals((GetCatalogResponse)that); + return false; + } + + public boolean equals(GetCatalogResponse that) { + if (that == null) + return false; + + boolean this_present_catalog = true && this.isSetCatalog(); + boolean that_present_catalog = true && that.isSetCatalog(); + if (this_present_catalog || that_present_catalog) { + if (!(this_present_catalog && that_present_catalog)) + return false; + if (!this.catalog.equals(that.catalog)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_catalog = true && (isSetCatalog()); + list.add(present_catalog); + if (present_catalog) + list.add(catalog); + + return list.hashCode(); + } + + @Override + public int compareTo(GetCatalogResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetCatalog()).compareTo(other.isSetCatalog()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatalog()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalog, other.catalog); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetCatalogResponse("); + boolean first = true; + + sb.append("catalog:"); + if (this.catalog == null) { + sb.append("null"); + } else { + sb.append(this.catalog); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (catalog != null) { + catalog.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetCatalogResponseStandardSchemeFactory implements SchemeFactory { + public GetCatalogResponseStandardScheme getScheme() { + return new GetCatalogResponseStandardScheme(); + } + } + + private static class GetCatalogResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetCatalogResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // CATALOG + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.catalog = new Catalog(); + struct.catalog.read(iprot); + struct.setCatalogIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetCatalogResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.catalog != null) { + oprot.writeFieldBegin(CATALOG_FIELD_DESC); + struct.catalog.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetCatalogResponseTupleSchemeFactory implements SchemeFactory { + public GetCatalogResponseTupleScheme getScheme() { + return new GetCatalogResponseTupleScheme(); + } + } + + private static class GetCatalogResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetCatalogResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetCatalog()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatalog()) { + struct.catalog.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetCatalogResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catalog = new Catalog(); + struct.catalog.read(iprot); + struct.setCatalogIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogsResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogsResponse.java new file mode 100644 index 0000000000..aafd528111 --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogsResponse.java @@ -0,0 +1,444 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetCatalogsResponse implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetCatalogsResponse"); + + private static final org.apache.thrift.protocol.TField NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("names", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetCatalogsResponseStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetCatalogsResponseTupleSchemeFactory()); + } + + private List names; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + NAMES((short)1, "names"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // NAMES + return NAMES; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.NAMES, new org.apache.thrift.meta_data.FieldMetaData("names", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetCatalogsResponse.class, metaDataMap); + } + + public GetCatalogsResponse() { + } + + public GetCatalogsResponse( + List names) + { + this(); + this.names = names; + } + + /** + * Performs a deep copy on other. + */ + public GetCatalogsResponse(GetCatalogsResponse other) { + if (other.isSetNames()) { + List __this__names = new ArrayList(other.names); + this.names = __this__names; + } + } + + public GetCatalogsResponse deepCopy() { + return new GetCatalogsResponse(this); + } + + @Override + public void clear() { + this.names = null; + } + + public int getNamesSize() { + return (this.names == null) ? 0 : this.names.size(); + } + + public java.util.Iterator getNamesIterator() { + return (this.names == null) ? null : this.names.iterator(); + } + + public void addToNames(String elem) { + if (this.names == null) { + this.names = new ArrayList(); + } + this.names.add(elem); + } + + public List getNames() { + return this.names; + } + + public void setNames(List names) { + this.names = names; + } + + public void unsetNames() { + this.names = null; + } + + /** Returns true if field names is set (has been assigned a value) and false otherwise */ + public boolean isSetNames() { + return this.names != null; + } + + public void setNamesIsSet(boolean value) { + if (!value) { + this.names = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case NAMES: + if (value == null) { + unsetNames(); + } else { + setNames((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case NAMES: + return getNames(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case NAMES: + return isSetNames(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetCatalogsResponse) + return this.equals((GetCatalogsResponse)that); + return false; + } + + public boolean equals(GetCatalogsResponse that) { + if (that == null) + return false; + + boolean this_present_names = true && this.isSetNames(); + boolean that_present_names = true && that.isSetNames(); + if (this_present_names || that_present_names) { + if (!(this_present_names && that_present_names)) + return false; + if (!this.names.equals(that.names)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_names = true && (isSetNames()); + list.add(present_names); + if (present_names) + list.add(names); + + return list.hashCode(); + } + + @Override + public int compareTo(GetCatalogsResponse other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetNames()).compareTo(other.isSetNames()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNames()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.names, other.names); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetCatalogsResponse("); + boolean first = true; + + sb.append("names:"); + if (this.names == null) { + sb.append("null"); + } else { + sb.append(this.names); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetCatalogsResponseStandardSchemeFactory implements SchemeFactory { + public GetCatalogsResponseStandardScheme getScheme() { + return new GetCatalogsResponseStandardScheme(); + } + } + + private static class GetCatalogsResponseStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetCatalogsResponse struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // NAMES + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list94 = iprot.readListBegin(); + struct.names = new ArrayList(_list94.size); + String _elem95; + for (int _i96 = 0; _i96 < _list94.size; ++_i96) + { + _elem95 = iprot.readString(); + struct.names.add(_elem95); + } + iprot.readListEnd(); + } + struct.setNamesIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetCatalogsResponse struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.names != null) { + oprot.writeFieldBegin(NAMES_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); + for (String _iter97 : struct.names) + { + oprot.writeString(_iter97); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetCatalogsResponseTupleSchemeFactory implements SchemeFactory { + public GetCatalogsResponseTupleScheme getScheme() { + return new GetCatalogsResponseTupleScheme(); + } + } + + private static class GetCatalogsResponseTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetCatalogsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetNames()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetNames()) { + { + oprot.writeI32(struct.names.size()); + for (String _iter98 : struct.names) + { + oprot.writeString(_iter98); + } + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetCatalogsResponse struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TList _list99 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list99.size); + String _elem100; + for (int _i101 = 0; _i101 < _list99.size; ++_i101) + { + _elem100 = iprot.readString(); + struct.names.add(_elem100); + } + } + struct.setNamesIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java index f5f1eb33c8..b64dea4c3c 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java @@ -619,13 +619,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list726 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list726.size); - long _elem727; - for (int _i728 = 0; _i728 < _list726.size; ++_i728) + org.apache.thrift.protocol.TList _list734 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list734.size); + long _elem735; + for (int _i736 = 0; _i736 < _list734.size; ++_i736) { - _elem727 = iprot.readI64(); - struct.fileIds.add(_elem727); + _elem735 = iprot.readI64(); + struct.fileIds.add(_elem735); } iprot.readListEnd(); } @@ -675,9 +675,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter729 : struct.fileIds) + for (long _iter737 : struct.fileIds) { - oprot.writeI64(_iter729); + oprot.writeI64(_iter737); } oprot.writeListEnd(); } @@ -719,9 +719,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter730 : struct.fileIds) + for (long _iter738 : struct.fileIds) { - oprot.writeI64(_iter730); + oprot.writeI64(_iter738); } } oprot.writeBinary(struct.expr); @@ -745,13 +745,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list731 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list731.size); - long _elem732; - for (int _i733 = 0; _i733 < _list731.size; ++_i733) + org.apache.thrift.protocol.TList _list739 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list739.size); + long _elem740; + for (int _i741 = 0; _i741 < _list739.size; ++_i741) { - _elem732 = iprot.readI64(); - struct.fileIds.add(_elem732); + _elem740 = iprot.readI64(); + struct.fileIds.add(_elem740); } } struct.setFileIdsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java index 370ab66e19..a01a36616f 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java @@ -444,16 +444,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map716 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map716.size); - long _key717; - MetadataPpdResult _val718; - for (int _i719 = 0; _i719 < _map716.size; ++_i719) + org.apache.thrift.protocol.TMap _map724 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map724.size); + long _key725; + MetadataPpdResult _val726; + for (int _i727 = 0; _i727 < _map724.size; ++_i727) { - _key717 = iprot.readI64(); - _val718 = new MetadataPpdResult(); - _val718.read(iprot); - struct.metadata.put(_key717, _val718); + _key725 = iprot.readI64(); + _val726 = new MetadataPpdResult(); + _val726.read(iprot); + struct.metadata.put(_key725, _val726); } iprot.readMapEnd(); } @@ -487,10 +487,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, struct.metadata.size())); - for (Map.Entry _iter720 : struct.metadata.entrySet()) + for (Map.Entry _iter728 : struct.metadata.entrySet()) { - oprot.writeI64(_iter720.getKey()); - _iter720.getValue().write(oprot); + oprot.writeI64(_iter728.getKey()); + _iter728.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -518,10 +518,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter721 : struct.metadata.entrySet()) + for (Map.Entry _iter729 : struct.metadata.entrySet()) { - oprot.writeI64(_iter721.getKey()); - _iter721.getValue().write(oprot); + oprot.writeI64(_iter729.getKey()); + _iter729.getValue().write(oprot); } } oprot.writeBool(struct.isSupported); @@ -531,16 +531,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map722 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.metadata = new HashMap(2*_map722.size); - long _key723; - MetadataPpdResult _val724; - for (int _i725 = 0; _i725 < _map722.size; ++_i725) + org.apache.thrift.protocol.TMap _map730 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.metadata = new HashMap(2*_map730.size); + long _key731; + MetadataPpdResult _val732; + for (int _i733 = 0; _i733 < _map730.size; ++_i733) { - _key723 = iprot.readI64(); - _val724 = new MetadataPpdResult(); - _val724.read(iprot); - struct.metadata.put(_key723, _val724); + _key731 = iprot.readI64(); + _val732 = new MetadataPpdResult(); + _val732.read(iprot); + struct.metadata.put(_key731, _val732); } } struct.setMetadataIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java index c74c2b0d74..4541cf404b 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list744 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list744.size); - long _elem745; - for (int _i746 = 0; _i746 < _list744.size; ++_i746) + org.apache.thrift.protocol.TList _list752 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list752.size); + long _elem753; + for (int _i754 = 0; _i754 < _list752.size; ++_i754) { - _elem745 = iprot.readI64(); - struct.fileIds.add(_elem745); + _elem753 = iprot.readI64(); + struct.fileIds.add(_elem753); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter747 : struct.fileIds) + for (long _iter755 : struct.fileIds) { - oprot.writeI64(_iter747); + oprot.writeI64(_iter755); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter748 : struct.fileIds) + for (long _iter756 : struct.fileIds) { - oprot.writeI64(_iter748); + oprot.writeI64(_iter756); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list749 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list749.size); - long _elem750; - for (int _i751 = 0; _i751 < _list749.size; ++_i751) + org.apache.thrift.protocol.TList _list757 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list757.size); + long _elem758; + for (int _i759 = 0; _i759 < _list757.size; ++_i759) { - _elem750 = iprot.readI64(); - struct.fileIds.add(_elem750); + _elem758 = iprot.readI64(); + struct.fileIds.add(_elem758); } } struct.setFileIdsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java index 6431b6db20..3efb371223 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java @@ -433,15 +433,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataResu case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map734 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map734.size); - long _key735; - ByteBuffer _val736; - for (int _i737 = 0; _i737 < _map734.size; ++_i737) + org.apache.thrift.protocol.TMap _map742 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map742.size); + long _key743; + ByteBuffer _val744; + for (int _i745 = 0; _i745 < _map742.size; ++_i745) { - _key735 = iprot.readI64(); - _val736 = iprot.readBinary(); - struct.metadata.put(_key735, _val736); + _key743 = iprot.readI64(); + _val744 = iprot.readBinary(); + struct.metadata.put(_key743, _val744); } iprot.readMapEnd(); } @@ -475,10 +475,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataRes oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (Map.Entry _iter738 : struct.metadata.entrySet()) + for (Map.Entry _iter746 : struct.metadata.entrySet()) { - oprot.writeI64(_iter738.getKey()); - oprot.writeBinary(_iter738.getValue()); + oprot.writeI64(_iter746.getKey()); + oprot.writeBinary(_iter746.getValue()); } oprot.writeMapEnd(); } @@ -506,10 +506,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter739 : struct.metadata.entrySet()) + for (Map.Entry _iter747 : struct.metadata.entrySet()) { - oprot.writeI64(_iter739.getKey()); - oprot.writeBinary(_iter739.getValue()); + oprot.writeI64(_iter747.getKey()); + oprot.writeBinary(_iter747.getValue()); } } oprot.writeBool(struct.isSupported); @@ -519,15 +519,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map740 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new HashMap(2*_map740.size); - long _key741; - ByteBuffer _val742; - for (int _i743 = 0; _i743 < _map740.size; ++_i743) + org.apache.thrift.protocol.TMap _map748 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new HashMap(2*_map748.size); + long _key749; + ByteBuffer _val750; + for (int _i751 = 0; _i751 < _map748.size; ++_i751) { - _key741 = iprot.readI64(); - _val742 = iprot.readBinary(); - struct.metadata.put(_key741, _val742); + _key749 = iprot.readI64(); + _val750 = iprot.readBinary(); + struct.metadata.put(_key749, _val750); } } struct.setMetadataIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java index a9061ab836..56f239eef9 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java @@ -447,14 +447,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenTxnsInfoResp case 2: // OPEN_TXNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list546 = iprot.readListBegin(); - struct.open_txns = new ArrayList(_list546.size); - TxnInfo _elem547; - for (int _i548 = 0; _i548 < _list546.size; ++_i548) + org.apache.thrift.protocol.TList _list554 = iprot.readListBegin(); + struct.open_txns = new ArrayList(_list554.size); + TxnInfo _elem555; + for (int _i556 = 0; _i556 < _list554.size; ++_i556) { - _elem547 = new TxnInfo(); - _elem547.read(iprot); - struct.open_txns.add(_elem547); + _elem555 = new TxnInfo(); + _elem555.read(iprot); + struct.open_txns.add(_elem555); } iprot.readListEnd(); } @@ -483,9 +483,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenTxnsInfoRes oprot.writeFieldBegin(OPEN_TXNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.open_txns.size())); - for (TxnInfo _iter549 : struct.open_txns) + for (TxnInfo _iter557 : struct.open_txns) { - _iter549.write(oprot); + _iter557.write(oprot); } oprot.writeListEnd(); } @@ -511,9 +511,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsInfoResp oprot.writeI64(struct.txn_high_water_mark); { oprot.writeI32(struct.open_txns.size()); - for (TxnInfo _iter550 : struct.open_txns) + for (TxnInfo _iter558 : struct.open_txns) { - _iter550.write(oprot); + _iter558.write(oprot); } } } @@ -524,14 +524,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsInfoRespo struct.txn_high_water_mark = iprot.readI64(); struct.setTxn_high_water_markIsSet(true); { - org.apache.thrift.protocol.TList _list551 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.open_txns = new ArrayList(_list551.size); - TxnInfo _elem552; - for (int _i553 = 0; _i553 < _list551.size; ++_i553) + org.apache.thrift.protocol.TList _list559 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.open_txns = new ArrayList(_list559.size); + TxnInfo _elem560; + for (int _i561 = 0; _i561 < _list559.size; ++_i561) { - _elem552 = new TxnInfo(); - _elem552.read(iprot); - struct.open_txns.add(_elem552); + _elem560 = new TxnInfo(); + _elem560.read(iprot); + struct.open_txns.add(_elem560); } } struct.setOpen_txnsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java index 12a125f32a..9688297b81 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java @@ -615,13 +615,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenTxnsResponse case 2: // OPEN_TXNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list554 = iprot.readListBegin(); - struct.open_txns = new ArrayList(_list554.size); - long _elem555; - for (int _i556 = 0; _i556 < _list554.size; ++_i556) + org.apache.thrift.protocol.TList _list562 = iprot.readListBegin(); + struct.open_txns = new ArrayList(_list562.size); + long _elem563; + for (int _i564 = 0; _i564 < _list562.size; ++_i564) { - _elem555 = iprot.readI64(); - struct.open_txns.add(_elem555); + _elem563 = iprot.readI64(); + struct.open_txns.add(_elem563); } iprot.readListEnd(); } @@ -666,9 +666,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenTxnsRespons oprot.writeFieldBegin(OPEN_TXNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.open_txns.size())); - for (long _iter557 : struct.open_txns) + for (long _iter565 : struct.open_txns) { - oprot.writeI64(_iter557); + oprot.writeI64(_iter565); } oprot.writeListEnd(); } @@ -704,9 +704,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsResponse oprot.writeI64(struct.txn_high_water_mark); { oprot.writeI32(struct.open_txns.size()); - for (long _iter558 : struct.open_txns) + for (long _iter566 : struct.open_txns) { - oprot.writeI64(_iter558); + oprot.writeI64(_iter566); } } oprot.writeBinary(struct.abortedBits); @@ -726,13 +726,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsResponse struct.txn_high_water_mark = iprot.readI64(); struct.setTxn_high_water_markIsSet(true); { - org.apache.thrift.protocol.TList _list559 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.open_txns = new ArrayList(_list559.size); - long _elem560; - for (int _i561 = 0; _i561 < _list559.size; ++_i561) + org.apache.thrift.protocol.TList _list567 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.open_txns = new ArrayList(_list567.size); + long _elem568; + for (int _i569 = 0; _i569 < _list567.size; ++_i569) { - _elem560 = iprot.readI64(); - struct.open_txns.add(_elem560); + _elem568 = iprot.readI64(); + struct.open_txns.add(_elem568); } } struct.setOpen_txnsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java index 708bf90f00..3c88d8fc6d 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java @@ -41,6 +41,7 @@ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("capabilities", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -51,12 +52,14 @@ private String dbName; // required private String tblName; // required private ClientCapabilities capabilities; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "dbName"), TBL_NAME((short)2, "tblName"), - CAPABILITIES((short)3, "capabilities"); + CAPABILITIES((short)3, "capabilities"), + CAT_NAME((short)4, "catName"); private static final Map byName = new HashMap(); @@ -77,6 +80,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // CAPABILITIES return CAPABILITIES; + case 4: // CAT_NAME + return CAT_NAME; default: return null; } @@ -117,7 +122,7 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.CAPABILITIES}; + private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -127,6 +132,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.CAPABILITIES, new org.apache.thrift.meta_data.FieldMetaData("capabilities", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClientCapabilities.class))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableRequest.class, metaDataMap); } @@ -156,6 +163,9 @@ public GetTableRequest(GetTableRequest other) { if (other.isSetCapabilities()) { this.capabilities = new ClientCapabilities(other.capabilities); } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public GetTableRequest deepCopy() { @@ -167,6 +177,7 @@ public void clear() { this.dbName = null; this.tblName = null; this.capabilities = null; + this.catName = null; } public String getDbName() { @@ -238,6 +249,29 @@ public void setCapabilitiesIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -264,6 +298,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -278,6 +320,9 @@ public Object getFieldValue(_Fields field) { case CAPABILITIES: return getCapabilities(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -295,6 +340,8 @@ public boolean isSet(_Fields field) { return isSetTblName(); case CAPABILITIES: return isSetCapabilities(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -339,6 +386,15 @@ public boolean equals(GetTableRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -361,6 +417,11 @@ public int hashCode() { if (present_capabilities) list.add(capabilities); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -402,6 +463,16 @@ public int compareTo(GetTableRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -447,6 +518,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -526,6 +607,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableRequest str org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -556,6 +645,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableRequest st oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -579,10 +675,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTableRequest str if (struct.isSetCapabilities()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetCapabilities()) { struct.capabilities.write(oprot); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -592,12 +694,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTableRequest stru struct.setDbNameIsSet(true); struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.capabilities = new ClientCapabilities(); struct.capabilities.read(iprot); struct.setCapabilitiesIsSet(true); } + if (incoming.get(1)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java index 7b9e6c589c..1c9fba8923 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java @@ -41,6 +41,7 @@ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("tblNames", org.apache.thrift.protocol.TType.LIST, (short)2); private static final org.apache.thrift.protocol.TField CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("capabilities", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -51,12 +52,14 @@ private String dbName; // required private List tblNames; // optional private ClientCapabilities capabilities; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "dbName"), TBL_NAMES((short)2, "tblNames"), - CAPABILITIES((short)3, "capabilities"); + CAPABILITIES((short)3, "capabilities"), + CAT_NAME((short)4, "catName"); private static final Map byName = new HashMap(); @@ -77,6 +80,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAMES; case 3: // CAPABILITIES return CAPABILITIES; + case 4: // CAT_NAME + return CAT_NAME; default: return null; } @@ -117,7 +122,7 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.TBL_NAMES,_Fields.CAPABILITIES}; + private static final _Fields optionals[] = {_Fields.TBL_NAMES,_Fields.CAPABILITIES,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -128,6 +133,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.CAPABILITIES, new org.apache.thrift.meta_data.FieldMetaData("capabilities", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClientCapabilities.class))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTablesRequest.class, metaDataMap); } @@ -156,6 +163,9 @@ public GetTablesRequest(GetTablesRequest other) { if (other.isSetCapabilities()) { this.capabilities = new ClientCapabilities(other.capabilities); } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public GetTablesRequest deepCopy() { @@ -167,6 +177,7 @@ public void clear() { this.dbName = null; this.tblNames = null; this.capabilities = null; + this.catName = null; } public String getDbName() { @@ -253,6 +264,29 @@ public void setCapabilitiesIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -279,6 +313,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -293,6 +335,9 @@ public Object getFieldValue(_Fields field) { case CAPABILITIES: return getCapabilities(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -310,6 +355,8 @@ public boolean isSet(_Fields field) { return isSetTblNames(); case CAPABILITIES: return isSetCapabilities(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -354,6 +401,15 @@ public boolean equals(GetTablesRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -376,6 +432,11 @@ public int hashCode() { if (present_capabilities) list.add(capabilities); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -417,6 +478,16 @@ public int compareTo(GetTablesRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -464,6 +535,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -525,13 +606,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesRequest st case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list792 = iprot.readListBegin(); - struct.tblNames = new ArrayList(_list792.size); - String _elem793; - for (int _i794 = 0; _i794 < _list792.size; ++_i794) + org.apache.thrift.protocol.TList _list800 = iprot.readListBegin(); + struct.tblNames = new ArrayList(_list800.size); + String _elem801; + for (int _i802 = 0; _i802 < _list800.size; ++_i802) { - _elem793 = iprot.readString(); - struct.tblNames.add(_elem793); + _elem801 = iprot.readString(); + struct.tblNames.add(_elem801); } iprot.readListEnd(); } @@ -549,6 +630,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesRequest st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -572,9 +661,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesRequest s oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tblNames.size())); - for (String _iter795 : struct.tblNames) + for (String _iter803 : struct.tblNames) { - oprot.writeString(_iter795); + oprot.writeString(_iter803); } oprot.writeListEnd(); } @@ -588,6 +677,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesRequest s oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -613,19 +709,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest st if (struct.isSetCapabilities()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetCatName()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetTblNames()) { { oprot.writeI32(struct.tblNames.size()); - for (String _iter796 : struct.tblNames) + for (String _iter804 : struct.tblNames) { - oprot.writeString(_iter796); + oprot.writeString(_iter804); } } } if (struct.isSetCapabilities()) { struct.capabilities.write(oprot); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -633,16 +735,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest str TTupleProtocol iprot = (TTupleProtocol) prot; struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list797 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tblNames = new ArrayList(_list797.size); - String _elem798; - for (int _i799 = 0; _i799 < _list797.size; ++_i799) + org.apache.thrift.protocol.TList _list805 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tblNames = new ArrayList(_list805.size); + String _elem806; + for (int _i807 = 0; _i807 < _list805.size; ++_i807) { - _elem798 = iprot.readString(); - struct.tblNames.add(_elem798); + _elem806 = iprot.readString(); + struct.tblNames.add(_elem806); } } struct.setTblNamesIsSet(true); @@ -652,6 +754,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest str struct.capabilities.read(iprot); struct.setCapabilitiesIsSet(true); } + if (incoming.get(2)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java index 3ad5104f16..c020773fd4 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesResult str case 1: // TABLES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list800 = iprot.readListBegin(); - struct.tables = new ArrayList
(_list800.size); - Table _elem801; - for (int _i802 = 0; _i802 < _list800.size; ++_i802) + org.apache.thrift.protocol.TList _list808 = iprot.readListBegin(); + struct.tables = new ArrayList
(_list808.size); + Table _elem809; + for (int _i810 = 0; _i810 < _list808.size; ++_i810) { - _elem801 = new Table(); - _elem801.read(iprot); - struct.tables.add(_elem801); + _elem809 = new Table(); + _elem809.read(iprot); + struct.tables.add(_elem809); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesResult st oprot.writeFieldBegin(TABLES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tables.size())); - for (Table _iter803 : struct.tables) + for (Table _iter811 : struct.tables) { - _iter803.write(oprot); + _iter811.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tables.size()); - for (Table _iter804 : struct.tables) + for (Table _iter812 : struct.tables) { - _iter804.write(oprot); + _iter812.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult str public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list805 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tables = new ArrayList
(_list805.size); - Table _elem806; - for (int _i807 = 0; _i807 < _list805.size; ++_i807) + org.apache.thrift.protocol.TList _list813 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tables = new ArrayList
(_list813.size); + Table _elem814; + for (int _i815 = 0; _i815 < _list813.size; ++_i815) { - _elem806 = new Table(); - _elem806.read(iprot); - struct.tables.add(_elem806); + _elem814 = new Table(); + _elem814.read(iprot); + struct.tables.add(_elem814); } } struct.setTablesIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java index f3db7ba467..68256c7850 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java @@ -436,13 +436,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsReq case 1: // FULL_TABLE_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list578 = iprot.readListBegin(); - struct.fullTableNames = new ArrayList(_list578.size); - String _elem579; - for (int _i580 = 0; _i580 < _list578.size; ++_i580) + org.apache.thrift.protocol.TList _list586 = iprot.readListBegin(); + struct.fullTableNames = new ArrayList(_list586.size); + String _elem587; + for (int _i588 = 0; _i588 < _list586.size; ++_i588) { - _elem579 = iprot.readString(); - struct.fullTableNames.add(_elem579); + _elem587 = iprot.readString(); + struct.fullTableNames.add(_elem587); } iprot.readListEnd(); } @@ -476,9 +476,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsRe oprot.writeFieldBegin(FULL_TABLE_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.fullTableNames.size())); - for (String _iter581 : struct.fullTableNames) + for (String _iter589 : struct.fullTableNames) { - oprot.writeString(_iter581); + oprot.writeString(_iter589); } oprot.writeListEnd(); } @@ -508,9 +508,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsReq TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fullTableNames.size()); - for (String _iter582 : struct.fullTableNames) + for (String _iter590 : struct.fullTableNames) { - oprot.writeString(_iter582); + oprot.writeString(_iter590); } } oprot.writeString(struct.validTxnList); @@ -520,13 +520,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsReq public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list583 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.fullTableNames = new ArrayList(_list583.size); - String _elem584; - for (int _i585 = 0; _i585 < _list583.size; ++_i585) + org.apache.thrift.protocol.TList _list591 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.fullTableNames = new ArrayList(_list591.size); + String _elem592; + for (int _i593 = 0; _i593 < _list591.size; ++_i593) { - _elem584 = iprot.readString(); - struct.fullTableNames.add(_elem584); + _elem592 = iprot.readString(); + struct.fullTableNames.add(_elem592); } } struct.setFullTableNamesIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java index e0b0dca2ef..5512fb4c1e 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsRes case 1: // TBL_VALID_WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list594 = iprot.readListBegin(); - struct.tblValidWriteIds = new ArrayList(_list594.size); - TableValidWriteIds _elem595; - for (int _i596 = 0; _i596 < _list594.size; ++_i596) + org.apache.thrift.protocol.TList _list602 = iprot.readListBegin(); + struct.tblValidWriteIds = new ArrayList(_list602.size); + TableValidWriteIds _elem603; + for (int _i604 = 0; _i604 < _list602.size; ++_i604) { - _elem595 = new TableValidWriteIds(); - _elem595.read(iprot); - struct.tblValidWriteIds.add(_elem595); + _elem603 = new TableValidWriteIds(); + _elem603.read(iprot); + struct.tblValidWriteIds.add(_elem603); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsRe oprot.writeFieldBegin(TBL_VALID_WRITE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tblValidWriteIds.size())); - for (TableValidWriteIds _iter597 : struct.tblValidWriteIds) + for (TableValidWriteIds _iter605 : struct.tblValidWriteIds) { - _iter597.write(oprot); + _iter605.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRes TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tblValidWriteIds.size()); - for (TableValidWriteIds _iter598 : struct.tblValidWriteIds) + for (TableValidWriteIds _iter606 : struct.tblValidWriteIds) { - _iter598.write(oprot); + _iter606.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRes public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list599 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tblValidWriteIds = new ArrayList(_list599.size); - TableValidWriteIds _elem600; - for (int _i601 = 0; _i601 < _list599.size; ++_i601) + org.apache.thrift.protocol.TList _list607 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tblValidWriteIds = new ArrayList(_list607.size); + TableValidWriteIds _elem608; + for (int _i609 = 0; _i609 < _list607.size; ++_i609) { - _elem600 = new TableValidWriteIds(); - _elem600.read(iprot); - struct.tblValidWriteIds.add(_elem600); + _elem608 = new TableValidWriteIds(); + _elem608.read(iprot); + struct.tblValidWriteIds.add(_elem608); } } struct.setTblValidWriteIdsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java index fb2f4dc8da..c5bc23e356 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java @@ -453,13 +453,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 1: // ABORTED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set634 = iprot.readSetBegin(); - struct.aborted = new HashSet(2*_set634.size); - long _elem635; - for (int _i636 = 0; _i636 < _set634.size; ++_i636) + org.apache.thrift.protocol.TSet _set642 = iprot.readSetBegin(); + struct.aborted = new HashSet(2*_set642.size); + long _elem643; + for (int _i644 = 0; _i644 < _set642.size; ++_i644) { - _elem635 = iprot.readI64(); - struct.aborted.add(_elem635); + _elem643 = iprot.readI64(); + struct.aborted.add(_elem643); } iprot.readSetEnd(); } @@ -471,13 +471,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 2: // NOSUCH if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set637 = iprot.readSetBegin(); - struct.nosuch = new HashSet(2*_set637.size); - long _elem638; - for (int _i639 = 0; _i639 < _set637.size; ++_i639) + org.apache.thrift.protocol.TSet _set645 = iprot.readSetBegin(); + struct.nosuch = new HashSet(2*_set645.size); + long _elem646; + for (int _i647 = 0; _i647 < _set645.size; ++_i647) { - _elem638 = iprot.readI64(); - struct.nosuch.add(_elem638); + _elem646 = iprot.readI64(); + struct.nosuch.add(_elem646); } iprot.readSetEnd(); } @@ -503,9 +503,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(ABORTED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.aborted.size())); - for (long _iter640 : struct.aborted) + for (long _iter648 : struct.aborted) { - oprot.writeI64(_iter640); + oprot.writeI64(_iter648); } oprot.writeSetEnd(); } @@ -515,9 +515,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(NOSUCH_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.nosuch.size())); - for (long _iter641 : struct.nosuch) + for (long _iter649 : struct.nosuch) { - oprot.writeI64(_iter641); + oprot.writeI64(_iter649); } oprot.writeSetEnd(); } @@ -542,16 +542,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.aborted.size()); - for (long _iter642 : struct.aborted) + for (long _iter650 : struct.aborted) { - oprot.writeI64(_iter642); + oprot.writeI64(_iter650); } } { oprot.writeI32(struct.nosuch.size()); - for (long _iter643 : struct.nosuch) + for (long _iter651 : struct.nosuch) { - oprot.writeI64(_iter643); + oprot.writeI64(_iter651); } } } @@ -560,24 +560,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TSet _set644 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.aborted = new HashSet(2*_set644.size); - long _elem645; - for (int _i646 = 0; _i646 < _set644.size; ++_i646) + org.apache.thrift.protocol.TSet _set652 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.aborted = new HashSet(2*_set652.size); + long _elem653; + for (int _i654 = 0; _i654 < _set652.size; ++_i654) { - _elem645 = iprot.readI64(); - struct.aborted.add(_elem645); + _elem653 = iprot.readI64(); + struct.aborted.add(_elem653); } } struct.setAbortedIsSet(true); { - org.apache.thrift.protocol.TSet _set647 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.nosuch = new HashSet(2*_set647.size); - long _elem648; - for (int _i649 = 0; _i649 < _set647.size; ++_i649) + org.apache.thrift.protocol.TSet _set655 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.nosuch = new HashSet(2*_set655.size); + long _elem656; + for (int _i657 = 0; _i657 < _set655.size; ++_i657) { - _elem648 = iprot.readI64(); - struct.nosuch.add(_elem648); + _elem656 = iprot.readI64(); + struct.nosuch.add(_elem656); } } struct.setNosuchIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java index 6d13d602cb..c37ce58009 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField OBJECT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("objectName", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField PART_VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("partValues", org.apache.thrift.protocol.TType.LIST, (short)4); private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("columnName", org.apache.thrift.protocol.TType.STRING, (short)5); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private String objectName; // required private List partValues; // required private String columnName; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -66,7 +68,8 @@ DB_NAME((short)2, "dbName"), OBJECT_NAME((short)3, "objectName"), PART_VALUES((short)4, "partValues"), - COLUMN_NAME((short)5, "columnName"); + COLUMN_NAME((short)5, "columnName"), + CAT_NAME((short)6, "catName"); private static final Map byName = new HashMap(); @@ -91,6 +94,8 @@ public static _Fields findByThriftId(int fieldId) { return PART_VALUES; case 5: // COLUMN_NAME return COLUMN_NAME; + case 6: // CAT_NAME + return CAT_NAME; default: return null; } @@ -131,6 +136,7 @@ public String getFieldName() { } // isset id assignments + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -145,6 +151,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.COLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("columnName", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HiveObjectRef.class, metaDataMap); } @@ -187,6 +195,9 @@ public HiveObjectRef(HiveObjectRef other) { if (other.isSetColumnName()) { this.columnName = other.columnName; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public HiveObjectRef deepCopy() { @@ -200,6 +211,7 @@ public void clear() { this.objectName = null; this.partValues = null; this.columnName = null; + this.catName = null; } /** @@ -340,6 +352,29 @@ public void setColumnNameIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case OBJECT_TYPE: @@ -382,6 +417,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -402,6 +445,9 @@ public Object getFieldValue(_Fields field) { case COLUMN_NAME: return getColumnName(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -423,6 +469,8 @@ public boolean isSet(_Fields field) { return isSetPartValues(); case COLUMN_NAME: return isSetColumnName(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -485,6 +533,15 @@ public boolean equals(HiveObjectRef that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -517,6 +574,11 @@ public int hashCode() { if (present_columnName) list.add(columnName); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -578,6 +640,16 @@ public int compareTo(HiveObjectRef other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -637,6 +709,16 @@ public String toString() { sb.append(this.columnName); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -730,6 +812,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HiveObjectRef struc org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -775,6 +865,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HiveObjectRef stru oprot.writeString(struct.columnName); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -808,7 +905,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HiveObjectRef struc if (struct.isSetColumnName()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetCatName()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetObjectType()) { oprot.writeI32(struct.objectType.getValue()); } @@ -830,12 +930,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HiveObjectRef struc if (struct.isSetColumnName()) { oprot.writeString(struct.columnName); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, HiveObjectRef struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { struct.objectType = org.apache.hadoop.hive.metastore.api.HiveObjectType.findByValue(iprot.readI32()); struct.setObjectTypeIsSet(true); @@ -865,6 +968,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, HiveObjectRef struct struct.columnName = iprot.readString(); struct.setColumnNameIsSet(true); } + if (incoming.get(5)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchema.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchema.java index 92d8b52181..285f402579 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchema.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchema.java @@ -40,12 +40,13 @@ private static final org.apache.thrift.protocol.TField SCHEMA_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaType", org.apache.thrift.protocol.TType.I32, (short)1); private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField COMPATIBILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("compatibility", org.apache.thrift.protocol.TType.I32, (short)4); - private static final org.apache.thrift.protocol.TField VALIDATION_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("validationLevel", org.apache.thrift.protocol.TType.I32, (short)5); - private static final org.apache.thrift.protocol.TField CAN_EVOLVE_FIELD_DESC = new org.apache.thrift.protocol.TField("canEvolve", org.apache.thrift.protocol.TType.BOOL, (short)6); - private static final org.apache.thrift.protocol.TField SCHEMA_GROUP_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaGroup", org.apache.thrift.protocol.TType.STRING, (short)7); - private static final org.apache.thrift.protocol.TField DESCRIPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("description", org.apache.thrift.protocol.TType.STRING, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField COMPATIBILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("compatibility", org.apache.thrift.protocol.TType.I32, (short)5); + private static final org.apache.thrift.protocol.TField VALIDATION_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("validationLevel", org.apache.thrift.protocol.TType.I32, (short)6); + private static final org.apache.thrift.protocol.TField CAN_EVOLVE_FIELD_DESC = new org.apache.thrift.protocol.TField("canEvolve", org.apache.thrift.protocol.TType.BOOL, (short)7); + private static final org.apache.thrift.protocol.TField SCHEMA_GROUP_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaGroup", org.apache.thrift.protocol.TType.STRING, (short)8); + private static final org.apache.thrift.protocol.TField DESCRIPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("description", org.apache.thrift.protocol.TType.STRING, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private SchemaType schemaType; // required private String name; // required + private String catName; // required private String dbName; // required private SchemaCompatibility compatibility; // required private SchemaValidation validationLevel; // required @@ -70,20 +72,21 @@ */ SCHEMA_TYPE((short)1, "schemaType"), NAME((short)2, "name"), - DB_NAME((short)3, "dbName"), + CAT_NAME((short)3, "catName"), + DB_NAME((short)4, "dbName"), /** * * @see SchemaCompatibility */ - COMPATIBILITY((short)4, "compatibility"), + COMPATIBILITY((short)5, "compatibility"), /** * * @see SchemaValidation */ - VALIDATION_LEVEL((short)5, "validationLevel"), - CAN_EVOLVE((short)6, "canEvolve"), - SCHEMA_GROUP((short)7, "schemaGroup"), - DESCRIPTION((short)8, "description"); + VALIDATION_LEVEL((short)6, "validationLevel"), + CAN_EVOLVE((short)7, "canEvolve"), + SCHEMA_GROUP((short)8, "schemaGroup"), + DESCRIPTION((short)9, "description"); private static final Map byName = new HashMap(); @@ -102,17 +105,19 @@ public static _Fields findByThriftId(int fieldId) { return SCHEMA_TYPE; case 2: // NAME return NAME; - case 3: // DB_NAME + case 3: // CAT_NAME + return CAT_NAME; + case 4: // DB_NAME return DB_NAME; - case 4: // COMPATIBILITY + case 5: // COMPATIBILITY return COMPATIBILITY; - case 5: // VALIDATION_LEVEL + case 6: // VALIDATION_LEVEL return VALIDATION_LEVEL; - case 6: // CAN_EVOLVE + case 7: // CAN_EVOLVE return CAN_EVOLVE; - case 7: // SCHEMA_GROUP + case 8: // SCHEMA_GROUP return SCHEMA_GROUP; - case 8: // DESCRIPTION + case 9: // DESCRIPTION return DESCRIPTION; default: return null; @@ -164,6 +169,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, SchemaType.class))); tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.COMPATIBILITY, new org.apache.thrift.meta_data.FieldMetaData("compatibility", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -186,6 +193,7 @@ public ISchema() { public ISchema( SchemaType schemaType, String name, + String catName, String dbName, SchemaCompatibility compatibility, SchemaValidation validationLevel, @@ -194,6 +202,7 @@ public ISchema( this(); this.schemaType = schemaType; this.name = name; + this.catName = catName; this.dbName = dbName; this.compatibility = compatibility; this.validationLevel = validationLevel; @@ -212,6 +221,9 @@ public ISchema(ISchema other) { if (other.isSetName()) { this.name = other.name; } + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetDbName()) { this.dbName = other.dbName; } @@ -238,6 +250,7 @@ public ISchema deepCopy() { public void clear() { this.schemaType = null; this.name = null; + this.catName = null; this.dbName = null; this.compatibility = null; this.validationLevel = null; @@ -301,6 +314,29 @@ public void setNameIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getDbName() { return this.dbName; } @@ -472,6 +508,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case DB_NAME: if (value == null) { unsetDbName(); @@ -531,6 +575,9 @@ public Object getFieldValue(_Fields field) { case NAME: return getName(); + case CAT_NAME: + return getCatName(); + case DB_NAME: return getDbName(); @@ -564,6 +611,8 @@ public boolean isSet(_Fields field) { return isSetSchemaType(); case NAME: return isSetName(); + case CAT_NAME: + return isSetCatName(); case DB_NAME: return isSetDbName(); case COMPATIBILITY: @@ -611,6 +660,15 @@ public boolean equals(ISchema that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_dbName = true && this.isSetDbName(); boolean that_present_dbName = true && that.isSetDbName(); if (this_present_dbName || that_present_dbName) { @@ -682,6 +740,11 @@ public int hashCode() { if (present_name) list.add(name); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_dbName = true && (isSetDbName()); list.add(present_dbName); if (present_dbName) @@ -743,6 +806,16 @@ public int compareTo(ISchema other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); if (lastComparison != 0) { return lastComparison; @@ -839,6 +912,14 @@ public String toString() { } first = false; if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("dbName:"); if (this.dbName == null) { sb.append("null"); @@ -947,7 +1028,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ISchema struct) thr org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // DB_NAME + case 3: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // DB_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); @@ -955,7 +1044,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ISchema struct) thr org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // COMPATIBILITY + case 5: // COMPATIBILITY if (schemeField.type == org.apache.thrift.protocol.TType.I32) { struct.compatibility = org.apache.hadoop.hive.metastore.api.SchemaCompatibility.findByValue(iprot.readI32()); struct.setCompatibilityIsSet(true); @@ -963,7 +1052,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ISchema struct) thr org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 5: // VALIDATION_LEVEL + case 6: // VALIDATION_LEVEL if (schemeField.type == org.apache.thrift.protocol.TType.I32) { struct.validationLevel = org.apache.hadoop.hive.metastore.api.SchemaValidation.findByValue(iprot.readI32()); struct.setValidationLevelIsSet(true); @@ -971,7 +1060,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ISchema struct) thr org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 6: // CAN_EVOLVE + case 7: // CAN_EVOLVE if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.canEvolve = iprot.readBool(); struct.setCanEvolveIsSet(true); @@ -979,7 +1068,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ISchema struct) thr org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 7: // SCHEMA_GROUP + case 8: // SCHEMA_GROUP if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.schemaGroup = iprot.readString(); struct.setSchemaGroupIsSet(true); @@ -987,7 +1076,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ISchema struct) thr org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 8: // DESCRIPTION + case 9: // DESCRIPTION if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.description = iprot.readString(); struct.setDescriptionIsSet(true); @@ -1018,6 +1107,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ISchema struct) th oprot.writeString(struct.name); oprot.writeFieldEnd(); } + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.dbName != null) { oprot.writeFieldBegin(DB_NAME_FIELD_DESC); oprot.writeString(struct.dbName); @@ -1074,31 +1168,37 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ISchema struct) thr if (struct.isSetName()) { optionals.set(1); } - if (struct.isSetDbName()) { + if (struct.isSetCatName()) { optionals.set(2); } - if (struct.isSetCompatibility()) { + if (struct.isSetDbName()) { optionals.set(3); } - if (struct.isSetValidationLevel()) { + if (struct.isSetCompatibility()) { optionals.set(4); } - if (struct.isSetCanEvolve()) { + if (struct.isSetValidationLevel()) { optionals.set(5); } - if (struct.isSetSchemaGroup()) { + if (struct.isSetCanEvolve()) { optionals.set(6); } - if (struct.isSetDescription()) { + if (struct.isSetSchemaGroup()) { optionals.set(7); } - oprot.writeBitSet(optionals, 8); + if (struct.isSetDescription()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); if (struct.isSetSchemaType()) { oprot.writeI32(struct.schemaType.getValue()); } if (struct.isSetName()) { oprot.writeString(struct.name); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } if (struct.isSetDbName()) { oprot.writeString(struct.dbName); } @@ -1122,7 +1222,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ISchema struct) thr @Override public void read(org.apache.thrift.protocol.TProtocol prot, ISchema struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(8); + BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { struct.schemaType = org.apache.hadoop.hive.metastore.api.SchemaType.findByValue(iprot.readI32()); struct.setSchemaTypeIsSet(true); @@ -1132,26 +1232,30 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ISchema struct) thro struct.setNameIsSet(true); } if (incoming.get(2)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } + if (incoming.get(3)) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); } - if (incoming.get(3)) { + if (incoming.get(4)) { struct.compatibility = org.apache.hadoop.hive.metastore.api.SchemaCompatibility.findByValue(iprot.readI32()); struct.setCompatibilityIsSet(true); } - if (incoming.get(4)) { + if (incoming.get(5)) { struct.validationLevel = org.apache.hadoop.hive.metastore.api.SchemaValidation.findByValue(iprot.readI32()); struct.setValidationLevelIsSet(true); } - if (incoming.get(5)) { + if (incoming.get(6)) { struct.canEvolve = iprot.readBool(); struct.setCanEvolveIsSet(true); } - if (incoming.get(6)) { + if (incoming.get(7)) { struct.schemaGroup = iprot.readString(); struct.setSchemaGroupIsSet(true); } - if (incoming.get(7)) { + if (incoming.get(8)) { struct.description = iprot.readString(); struct.setDescriptionIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchemaName.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchemaName.java index ad2505198d..6f0e0525a8 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchemaName.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchemaName.java @@ -38,8 +38,9 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ISchemaName implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ISchemaName"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,13 +48,15 @@ schemes.put(TupleScheme.class, new ISchemaNameTupleSchemeFactory()); } + private String catName; // required private String dbName; // required private String schemaName; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "dbName"), - SCHEMA_NAME((short)2, "schemaName"); + CAT_NAME((short)1, "catName"), + DB_NAME((short)2, "dbName"), + SCHEMA_NAME((short)3, "schemaName"); private static final Map byName = new HashMap(); @@ -68,9 +71,11 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME + case 1: // CAT_NAME + return CAT_NAME; + case 2: // DB_NAME return DB_NAME; - case 2: // SCHEMA_NAME + case 3: // SCHEMA_NAME return SCHEMA_NAME; default: return null; @@ -115,6 +120,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("schemaName", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -127,10 +134,12 @@ public ISchemaName() { } public ISchemaName( + String catName, String dbName, String schemaName) { this(); + this.catName = catName; this.dbName = dbName; this.schemaName = schemaName; } @@ -139,6 +148,9 @@ public ISchemaName( * Performs a deep copy on other. */ public ISchemaName(ISchemaName other) { + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetDbName()) { this.dbName = other.dbName; } @@ -153,10 +165,34 @@ public ISchemaName deepCopy() { @Override public void clear() { + this.catName = null; this.dbName = null; this.schemaName = null; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getDbName() { return this.dbName; } @@ -205,6 +241,14 @@ public void setSchemaNameIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case DB_NAME: if (value == null) { unsetDbName(); @@ -226,6 +270,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case DB_NAME: return getDbName(); @@ -243,6 +290,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case DB_NAME: return isSetDbName(); case SCHEMA_NAME: @@ -264,6 +313,15 @@ public boolean equals(ISchemaName that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_dbName = true && this.isSetDbName(); boolean that_present_dbName = true && that.isSetDbName(); if (this_present_dbName || that_present_dbName) { @@ -289,6 +347,11 @@ public boolean equals(ISchemaName that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_dbName = true && (isSetDbName()); list.add(present_dbName); if (present_dbName) @@ -310,6 +373,16 @@ public int compareTo(ISchemaName other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); if (lastComparison != 0) { return lastComparison; @@ -350,6 +423,14 @@ public String toString() { StringBuilder sb = new StringBuilder("ISchemaName("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("dbName:"); if (this.dbName == null) { sb.append("null"); @@ -408,7 +489,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ISchemaName struct) break; } switch (schemeField.id) { - case 1: // DB_NAME + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DB_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); @@ -416,7 +505,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ISchemaName struct) org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // SCHEMA_NAME + case 3: // SCHEMA_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.schemaName = iprot.readString(); struct.setSchemaNameIsSet(true); @@ -437,6 +526,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ISchemaName struct struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.dbName != null) { oprot.writeFieldBegin(DB_NAME_FIELD_DESC); oprot.writeString(struct.dbName); @@ -465,13 +559,19 @@ public ISchemaNameTupleScheme getScheme() { public void write(org.apache.thrift.protocol.TProtocol prot, ISchemaName struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetDbName()) { + if (struct.isSetCatName()) { optionals.set(0); } - if (struct.isSetSchemaName()) { + if (struct.isSetDbName()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetSchemaName()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } if (struct.isSetDbName()) { oprot.writeString(struct.dbName); } @@ -483,12 +583,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ISchemaName struct) @Override public void read(org.apache.thrift.protocol.TProtocol prot, ISchemaName struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } + if (incoming.get(1)) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.schemaName = iprot.readString(); struct.setSchemaNameIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java index d1cdb4b541..8a3361181b 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java @@ -538,13 +538,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 2: // FILES_ADDED if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list692 = iprot.readListBegin(); - struct.filesAdded = new ArrayList(_list692.size); - String _elem693; - for (int _i694 = 0; _i694 < _list692.size; ++_i694) + org.apache.thrift.protocol.TList _list700 = iprot.readListBegin(); + struct.filesAdded = new ArrayList(_list700.size); + String _elem701; + for (int _i702 = 0; _i702 < _list700.size; ++_i702) { - _elem693 = iprot.readString(); - struct.filesAdded.add(_elem693); + _elem701 = iprot.readString(); + struct.filesAdded.add(_elem701); } iprot.readListEnd(); } @@ -556,13 +556,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 3: // FILES_ADDED_CHECKSUM if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list695 = iprot.readListBegin(); - struct.filesAddedChecksum = new ArrayList(_list695.size); - String _elem696; - for (int _i697 = 0; _i697 < _list695.size; ++_i697) + org.apache.thrift.protocol.TList _list703 = iprot.readListBegin(); + struct.filesAddedChecksum = new ArrayList(_list703.size); + String _elem704; + for (int _i705 = 0; _i705 < _list703.size; ++_i705) { - _elem696 = iprot.readString(); - struct.filesAddedChecksum.add(_elem696); + _elem704 = iprot.readString(); + struct.filesAddedChecksum.add(_elem704); } iprot.readListEnd(); } @@ -593,9 +593,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAdded.size())); - for (String _iter698 : struct.filesAdded) + for (String _iter706 : struct.filesAdded) { - oprot.writeString(_iter698); + oprot.writeString(_iter706); } oprot.writeListEnd(); } @@ -606,9 +606,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_CHECKSUM_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAddedChecksum.size())); - for (String _iter699 : struct.filesAddedChecksum) + for (String _iter707 : struct.filesAddedChecksum) { - oprot.writeString(_iter699); + oprot.writeString(_iter707); } oprot.writeListEnd(); } @@ -634,9 +634,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.filesAdded.size()); - for (String _iter700 : struct.filesAdded) + for (String _iter708 : struct.filesAdded) { - oprot.writeString(_iter700); + oprot.writeString(_iter708); } } BitSet optionals = new BitSet(); @@ -653,9 +653,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD if (struct.isSetFilesAddedChecksum()) { { oprot.writeI32(struct.filesAddedChecksum.size()); - for (String _iter701 : struct.filesAddedChecksum) + for (String _iter709 : struct.filesAddedChecksum) { - oprot.writeString(_iter701); + oprot.writeString(_iter709); } } } @@ -665,13 +665,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestData struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list702 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAdded = new ArrayList(_list702.size); - String _elem703; - for (int _i704 = 0; _i704 < _list702.size; ++_i704) + org.apache.thrift.protocol.TList _list710 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAdded = new ArrayList(_list710.size); + String _elem711; + for (int _i712 = 0; _i712 < _list710.size; ++_i712) { - _elem703 = iprot.readString(); - struct.filesAdded.add(_elem703); + _elem711 = iprot.readString(); + struct.filesAdded.add(_elem711); } } struct.setFilesAddedIsSet(true); @@ -682,13 +682,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestDa } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list705 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAddedChecksum = new ArrayList(_list705.size); - String _elem706; - for (int _i707 = 0; _i707 < _list705.size; ++_i707) + org.apache.thrift.protocol.TList _list713 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAddedChecksum = new ArrayList(_list713.size); + String _elem714; + for (int _i715 = 0; _i715 < _list713.size; ++_i715) { - _elem706 = iprot.readString(); - struct.filesAddedChecksum.add(_elem706); + _elem714 = iprot.readString(); + struct.filesAddedChecksum.add(_elem714); } } struct.setFilesAddedChecksumIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java index 722619f97b..6f03ea96c6 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java @@ -689,14 +689,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LockRequest struct) case 1: // COMPONENT if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list618 = iprot.readListBegin(); - struct.component = new ArrayList(_list618.size); - LockComponent _elem619; - for (int _i620 = 0; _i620 < _list618.size; ++_i620) + org.apache.thrift.protocol.TList _list626 = iprot.readListBegin(); + struct.component = new ArrayList(_list626.size); + LockComponent _elem627; + for (int _i628 = 0; _i628 < _list626.size; ++_i628) { - _elem619 = new LockComponent(); - _elem619.read(iprot); - struct.component.add(_elem619); + _elem627 = new LockComponent(); + _elem627.read(iprot); + struct.component.add(_elem627); } iprot.readListEnd(); } @@ -754,9 +754,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LockRequest struct oprot.writeFieldBegin(COMPONENT_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.component.size())); - for (LockComponent _iter621 : struct.component) + for (LockComponent _iter629 : struct.component) { - _iter621.write(oprot); + _iter629.write(oprot); } oprot.writeListEnd(); } @@ -803,9 +803,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.component.size()); - for (LockComponent _iter622 : struct.component) + for (LockComponent _iter630 : struct.component) { - _iter622.write(oprot); + _iter630.write(oprot); } } oprot.writeString(struct.user); @@ -830,14 +830,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) public void read(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list623 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.component = new ArrayList(_list623.size); - LockComponent _elem624; - for (int _i625 = 0; _i625 < _list623.size; ++_i625) + org.apache.thrift.protocol.TList _list631 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.component = new ArrayList(_list631.size); + LockComponent _elem632; + for (int _i633 = 0; _i633 < _list631.size; ++_i633) { - _elem624 = new LockComponent(); - _elem624.read(iprot); - struct.component.add(_elem624); + _elem632 = new LockComponent(); + _elem632.read(iprot); + struct.component.add(_elem632); } } struct.setComponentIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java index fec35d50b7..faee4be82e 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java @@ -518,13 +518,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Materialization str case 1: // TABLES_USED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set808 = iprot.readSetBegin(); - struct.tablesUsed = new HashSet(2*_set808.size); - String _elem809; - for (int _i810 = 0; _i810 < _set808.size; ++_i810) + org.apache.thrift.protocol.TSet _set816 = iprot.readSetBegin(); + struct.tablesUsed = new HashSet(2*_set816.size); + String _elem817; + for (int _i818 = 0; _i818 < _set816.size; ++_i818) { - _elem809 = iprot.readString(); - struct.tablesUsed.add(_elem809); + _elem817 = iprot.readString(); + struct.tablesUsed.add(_elem817); } iprot.readSetEnd(); } @@ -566,9 +566,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Materialization st oprot.writeFieldBegin(TABLES_USED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.tablesUsed.size())); - for (String _iter811 : struct.tablesUsed) + for (String _iter819 : struct.tablesUsed) { - oprot.writeString(_iter811); + oprot.writeString(_iter819); } oprot.writeSetEnd(); } @@ -603,9 +603,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Materialization str TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tablesUsed.size()); - for (String _iter812 : struct.tablesUsed) + for (String _iter820 : struct.tablesUsed) { - oprot.writeString(_iter812); + oprot.writeString(_iter820); } } oprot.writeI64(struct.invalidationTime); @@ -623,13 +623,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Materialization str public void read(org.apache.thrift.protocol.TProtocol prot, Materialization struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TSet _set813 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tablesUsed = new HashSet(2*_set813.size); - String _elem814; - for (int _i815 = 0; _i815 < _set813.size; ++_i815) + org.apache.thrift.protocol.TSet _set821 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tablesUsed = new HashSet(2*_set821.size); + String _elem822; + for (int _i823 = 0; _i823 < _set821.size; ++_i823) { - _elem814 = iprot.readString(); - struct.tablesUsed.add(_elem814); + _elem822 = iprot.readString(); + struct.tablesUsed.add(_elem822); } } struct.setTablesUsedIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsRequest.java index da5d72b3ef..803dc206f3 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsRequest.java @@ -38,8 +38,9 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class NotNullConstraintsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotNullConstraintsRequest"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,13 +48,15 @@ schemes.put(TupleScheme.class, new NotNullConstraintsRequestTupleSchemeFactory()); } + private String catName; // required private String db_name; // required private String tbl_name; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "db_name"), - TBL_NAME((short)2, "tbl_name"); + CAT_NAME((short)1, "catName"), + DB_NAME((short)2, "db_name"), + TBL_NAME((short)3, "tbl_name"); private static final Map byName = new HashMap(); @@ -68,9 +71,11 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME + case 1: // CAT_NAME + return CAT_NAME; + case 2: // DB_NAME return DB_NAME; - case 2: // TBL_NAME + case 3: // TBL_NAME return TBL_NAME; default: return null; @@ -115,6 +120,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.REQUIRED, @@ -127,10 +134,12 @@ public NotNullConstraintsRequest() { } public NotNullConstraintsRequest( + String catName, String db_name, String tbl_name) { this(); + this.catName = catName; this.db_name = db_name; this.tbl_name = tbl_name; } @@ -139,6 +148,9 @@ public NotNullConstraintsRequest( * Performs a deep copy on other. */ public NotNullConstraintsRequest(NotNullConstraintsRequest other) { + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetDb_name()) { this.db_name = other.db_name; } @@ -153,10 +165,34 @@ public NotNullConstraintsRequest deepCopy() { @Override public void clear() { + this.catName = null; this.db_name = null; this.tbl_name = null; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getDb_name() { return this.db_name; } @@ -205,6 +241,14 @@ public void setTbl_nameIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case DB_NAME: if (value == null) { unsetDb_name(); @@ -226,6 +270,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case DB_NAME: return getDb_name(); @@ -243,6 +290,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case DB_NAME: return isSetDb_name(); case TBL_NAME: @@ -264,6 +313,15 @@ public boolean equals(NotNullConstraintsRequest that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_db_name = true && this.isSetDb_name(); boolean that_present_db_name = true && that.isSetDb_name(); if (this_present_db_name || that_present_db_name) { @@ -289,6 +347,11 @@ public boolean equals(NotNullConstraintsRequest that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_db_name = true && (isSetDb_name()); list.add(present_db_name); if (present_db_name) @@ -310,6 +373,16 @@ public int compareTo(NotNullConstraintsRequest other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); if (lastComparison != 0) { return lastComparison; @@ -350,6 +423,14 @@ public String toString() { StringBuilder sb = new StringBuilder("NotNullConstraintsRequest("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("db_name:"); if (this.db_name == null) { sb.append("null"); @@ -371,6 +452,10 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields + if (!isSetCatName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'catName' is unset! Struct:" + toString()); + } + if (!isSetDb_name()) { throw new org.apache.thrift.protocol.TProtocolException("Required field 'db_name' is unset! Struct:" + toString()); } @@ -416,7 +501,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotNullConstraintsR break; } switch (schemeField.id) { - case 1: // DB_NAME + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DB_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -424,7 +517,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotNullConstraintsR org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TBL_NAME + case 3: // TBL_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); @@ -445,6 +538,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotNullConstraints struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.db_name != null) { oprot.writeFieldBegin(DB_NAME_FIELD_DESC); oprot.writeString(struct.db_name); @@ -472,6 +570,7 @@ public NotNullConstraintsRequestTupleScheme getScheme() { @Override public void write(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsRequest struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.catName); oprot.writeString(struct.db_name); oprot.writeString(struct.tbl_name); } @@ -479,6 +578,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsR @Override public void read(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); struct.tbl_name = iprot.readString(); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsResponse.java index a049468292..002ca13211 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotNullConstraintsR case 1: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list336 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list336.size); - SQLNotNullConstraint _elem337; - for (int _i338 = 0; _i338 < _list336.size; ++_i338) + org.apache.thrift.protocol.TList _list344 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list344.size); + SQLNotNullConstraint _elem345; + for (int _i346 = 0; _i346 < _list344.size; ++_i346) { - _elem337 = new SQLNotNullConstraint(); - _elem337.read(iprot); - struct.notNullConstraints.add(_elem337); + _elem345 = new SQLNotNullConstraint(); + _elem345.read(iprot); + struct.notNullConstraints.add(_elem345); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotNullConstraints oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter339 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter347 : struct.notNullConstraints) { - _iter339.write(oprot); + _iter347.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsR TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter340 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter348 : struct.notNullConstraints) { - _iter340.write(oprot); + _iter348.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsR public void read(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list341.size); - SQLNotNullConstraint _elem342; - for (int _i343 = 0; _i343 < _list341.size; ++_i343) + org.apache.thrift.protocol.TList _list349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list349.size); + SQLNotNullConstraint _elem350; + for (int _i351 = 0; _i351 < _list349.size; ++_i351) { - _elem342 = new SQLNotNullConstraint(); - _elem342.read(iprot); - struct.notNullConstraints.add(_elem342); + _elem350 = new SQLNotNullConstraint(); + _elem350.read(iprot); + struct.notNullConstraints.add(_elem350); } } struct.setNotNullConstraintsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java index 49ede82a13..e0e1cd4dc5 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java @@ -45,6 +45,7 @@ private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)5); private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)6); private static final org.apache.thrift.protocol.TField MESSAGE_FORMAT_FIELD_DESC = new org.apache.thrift.protocol.TField("messageFormat", org.apache.thrift.protocol.TType.STRING, (short)7); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)8); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -59,6 +60,7 @@ private String tableName; // optional private String message; // required private String messageFormat; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -68,7 +70,8 @@ DB_NAME((short)4, "dbName"), TABLE_NAME((short)5, "tableName"), MESSAGE((short)6, "message"), - MESSAGE_FORMAT((short)7, "messageFormat"); + MESSAGE_FORMAT((short)7, "messageFormat"), + CAT_NAME((short)8, "catName"); private static final Map byName = new HashMap(); @@ -97,6 +100,8 @@ public static _Fields findByThriftId(int fieldId) { return MESSAGE; case 7: // MESSAGE_FORMAT return MESSAGE_FORMAT; + case 8: // CAT_NAME + return CAT_NAME; default: return null; } @@ -140,7 +145,7 @@ public String getFieldName() { private static final int __EVENTID_ISSET_ID = 0; private static final int __EVENTTIME_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.DB_NAME,_Fields.TABLE_NAME,_Fields.MESSAGE_FORMAT}; + private static final _Fields optionals[] = {_Fields.DB_NAME,_Fields.TABLE_NAME,_Fields.MESSAGE_FORMAT,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -158,6 +163,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.MESSAGE_FORMAT, new org.apache.thrift.meta_data.FieldMetaData("messageFormat", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NotificationEvent.class, metaDataMap); } @@ -202,6 +209,9 @@ public NotificationEvent(NotificationEvent other) { if (other.isSetMessageFormat()) { this.messageFormat = other.messageFormat; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public NotificationEvent deepCopy() { @@ -219,6 +229,7 @@ public void clear() { this.tableName = null; this.message = null; this.messageFormat = null; + this.catName = null; } public long getEventId() { @@ -380,6 +391,29 @@ public void setMessageFormatIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case EVENT_ID: @@ -438,6 +472,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -464,6 +506,9 @@ public Object getFieldValue(_Fields field) { case MESSAGE_FORMAT: return getMessageFormat(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -489,6 +534,8 @@ public boolean isSet(_Fields field) { return isSetMessage(); case MESSAGE_FORMAT: return isSetMessageFormat(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -569,6 +616,15 @@ public boolean equals(NotificationEvent that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -611,6 +667,11 @@ public int hashCode() { if (present_messageFormat) list.add(messageFormat); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -692,6 +753,16 @@ public int compareTo(NotificationEvent other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -765,6 +836,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -882,6 +963,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEvent s org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 8: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -932,6 +1021,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEvent oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -963,7 +1059,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEvent s if (struct.isSetMessageFormat()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetCatName()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDbName()) { oprot.writeString(struct.dbName); } @@ -973,6 +1072,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEvent s if (struct.isSetMessageFormat()) { oprot.writeString(struct.messageFormat); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -986,7 +1088,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEvent st struct.setEventTypeIsSet(true); struct.message = iprot.readString(); struct.setMessageIsSet(true); - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); @@ -999,6 +1101,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEvent st struct.messageFormat = iprot.readString(); struct.setMessageFormatIsSet(true); } + if (incoming.get(3)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java index ff40ab592c..5045bdadda 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventRe case 1: // EVENTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list684 = iprot.readListBegin(); - struct.events = new ArrayList(_list684.size); - NotificationEvent _elem685; - for (int _i686 = 0; _i686 < _list684.size; ++_i686) + org.apache.thrift.protocol.TList _list692 = iprot.readListBegin(); + struct.events = new ArrayList(_list692.size); + NotificationEvent _elem693; + for (int _i694 = 0; _i694 < _list692.size; ++_i694) { - _elem685 = new NotificationEvent(); - _elem685.read(iprot); - struct.events.add(_elem685); + _elem693 = new NotificationEvent(); + _elem693.read(iprot); + struct.events.add(_elem693); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventR oprot.writeFieldBegin(EVENTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.events.size())); - for (NotificationEvent _iter687 : struct.events) + for (NotificationEvent _iter695 : struct.events) { - _iter687.write(oprot); + _iter695.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.events.size()); - for (NotificationEvent _iter688 : struct.events) + for (NotificationEvent _iter696 : struct.events) { - _iter688.write(oprot); + _iter696.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list689 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.events = new ArrayList(_list689.size); - NotificationEvent _elem690; - for (int _i691 = 0; _i691 < _list689.size; ++_i691) + org.apache.thrift.protocol.TList _list697 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.events = new ArrayList(_list697.size); + NotificationEvent _elem698; + for (int _i699 = 0; _i699 < _list697.size; ++_i699) { - _elem690 = new NotificationEvent(); - _elem690.read(iprot); - struct.events.add(_elem690); + _elem698 = new NotificationEvent(); + _elem698.read(iprot); + struct.events.add(_elem698); } } struct.setEventsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventsCountRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventsCountRequest.java index 4855575a9f..a4a5218f91 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventsCountRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventsCountRequest.java @@ -40,6 +40,7 @@ private static final org.apache.thrift.protocol.TField FROM_EVENT_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("fromEventId", org.apache.thrift.protocol.TType.I64, (short)1); private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -49,11 +50,13 @@ private long fromEventId; // required private String dbName; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { FROM_EVENT_ID((short)1, "fromEventId"), - DB_NAME((short)2, "dbName"); + DB_NAME((short)2, "dbName"), + CAT_NAME((short)3, "catName"); private static final Map byName = new HashMap(); @@ -72,6 +75,8 @@ public static _Fields findByThriftId(int fieldId) { return FROM_EVENT_ID; case 2: // DB_NAME return DB_NAME; + case 3: // CAT_NAME + return CAT_NAME; default: return null; } @@ -114,6 +119,7 @@ public String getFieldName() { // isset id assignments private static final int __FROMEVENTID_ISSET_ID = 0; private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -121,6 +127,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NotificationEventsCountRequest.class, metaDataMap); } @@ -147,6 +155,9 @@ public NotificationEventsCountRequest(NotificationEventsCountRequest other) { if (other.isSetDbName()) { this.dbName = other.dbName; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public NotificationEventsCountRequest deepCopy() { @@ -158,6 +169,7 @@ public void clear() { setFromEventIdIsSet(false); this.fromEventId = 0; this.dbName = null; + this.catName = null; } public long getFromEventId() { @@ -205,6 +217,29 @@ public void setDbNameIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case FROM_EVENT_ID: @@ -223,6 +258,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -234,6 +277,9 @@ public Object getFieldValue(_Fields field) { case DB_NAME: return getDbName(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -249,6 +295,8 @@ public boolean isSet(_Fields field) { return isSetFromEventId(); case DB_NAME: return isSetDbName(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -284,6 +332,15 @@ public boolean equals(NotificationEventsCountRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -301,6 +358,11 @@ public int hashCode() { if (present_dbName) list.add(dbName); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -332,6 +394,16 @@ public int compareTo(NotificationEventsCountRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -363,6 +435,16 @@ public String toString() { sb.append(this.dbName); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -432,6 +514,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventsC org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -453,6 +543,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEvents oprot.writeString(struct.dbName); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -472,6 +569,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventsC TTupleProtocol oprot = (TTupleProtocol) prot; oprot.writeI64(struct.fromEventId); oprot.writeString(struct.dbName); + BitSet optionals = new BitSet(); + if (struct.isSetCatName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -481,6 +586,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventsCo struct.setFromEventIdIsSet(true); struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java index 8f08ed93d0..7adac3a800 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, OpenTxnsResponse st case 1: // TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list562 = iprot.readListBegin(); - struct.txn_ids = new ArrayList(_list562.size); - long _elem563; - for (int _i564 = 0; _i564 < _list562.size; ++_i564) + org.apache.thrift.protocol.TList _list570 = iprot.readListBegin(); + struct.txn_ids = new ArrayList(_list570.size); + long _elem571; + for (int _i572 = 0; _i572 < _list570.size; ++_i572) { - _elem563 = iprot.readI64(); - struct.txn_ids.add(_elem563); + _elem571 = iprot.readI64(); + struct.txn_ids.add(_elem571); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, OpenTxnsResponse s oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txn_ids.size())); - for (long _iter565 : struct.txn_ids) + for (long _iter573 : struct.txn_ids) { - oprot.writeI64(_iter565); + oprot.writeI64(_iter573); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse st TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txn_ids.size()); - for (long _iter566 : struct.txn_ids) + for (long _iter574 : struct.txn_ids) { - oprot.writeI64(_iter566); + oprot.writeI64(_iter574); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse st public void read(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list567 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.txn_ids = new ArrayList(_list567.size); - long _elem568; - for (int _i569 = 0; _i569 < _list567.size; ++_i569) + org.apache.thrift.protocol.TList _list575 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txn_ids = new ArrayList(_list575.size); + long _elem576; + for (int _i577 = 0; _i577 < _list575.size; ++_i577) { - _elem568 = iprot.readI64(); - struct.txn_ids.add(_elem568); + _elem576 = iprot.readI64(); + struct.txn_ids.add(_elem576); } } struct.setTxn_idsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java index 3a13753647..c58e1cb7d9 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java @@ -46,6 +46,7 @@ private static final org.apache.thrift.protocol.TField SD_FIELD_DESC = new org.apache.thrift.protocol.TField("sd", org.apache.thrift.protocol.TType.STRUCT, (short)6); private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)7); private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -61,6 +62,7 @@ private StorageDescriptor sd; // required private Map parameters; // required private PrincipalPrivilegeSet privileges; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -71,7 +73,8 @@ LAST_ACCESS_TIME((short)5, "lastAccessTime"), SD((short)6, "sd"), PARAMETERS((short)7, "parameters"), - PRIVILEGES((short)8, "privileges"); + PRIVILEGES((short)8, "privileges"), + CAT_NAME((short)9, "catName"); private static final Map byName = new HashMap(); @@ -102,6 +105,8 @@ public static _Fields findByThriftId(int fieldId) { return PARAMETERS; case 8: // PRIVILEGES return PRIVILEGES; + case 9: // CAT_NAME + return CAT_NAME; default: return null; } @@ -145,7 +150,7 @@ public String getFieldName() { private static final int __CREATETIME_ISSET_ID = 0; private static final int __LASTACCESSTIME_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PRIVILEGES}; + private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -168,6 +173,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Partition.class, metaDataMap); } @@ -223,6 +230,9 @@ public Partition(Partition other) { if (other.isSetPrivileges()) { this.privileges = new PrincipalPrivilegeSet(other.privileges); } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public Partition deepCopy() { @@ -241,6 +251,7 @@ public void clear() { this.sd = null; this.parameters = null; this.privileges = null; + this.catName = null; } public int getValuesSize() { @@ -451,6 +462,29 @@ public void setPrivilegesIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case VALUES: @@ -517,6 +551,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -546,6 +588,9 @@ public Object getFieldValue(_Fields field) { case PRIVILEGES: return getPrivileges(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -573,6 +618,8 @@ public boolean isSet(_Fields field) { return isSetParameters(); case PRIVILEGES: return isSetPrivileges(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -662,6 +709,15 @@ public boolean equals(Partition that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -709,6 +765,11 @@ public int hashCode() { if (present_privileges) list.add(privileges); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -800,6 +861,16 @@ public int compareTo(Partition other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -877,6 +948,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -931,13 +1012,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Partition struct) t case 1: // VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list208 = iprot.readListBegin(); - struct.values = new ArrayList(_list208.size); - String _elem209; - for (int _i210 = 0; _i210 < _list208.size; ++_i210) + org.apache.thrift.protocol.TList _list216 = iprot.readListBegin(); + struct.values = new ArrayList(_list216.size); + String _elem217; + for (int _i218 = 0; _i218 < _list216.size; ++_i218) { - _elem209 = iprot.readString(); - struct.values.add(_elem209); + _elem217 = iprot.readString(); + struct.values.add(_elem217); } iprot.readListEnd(); } @@ -990,15 +1071,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Partition struct) t case 7: // PARAMETERS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map211 = iprot.readMapBegin(); - struct.parameters = new HashMap(2*_map211.size); - String _key212; - String _val213; - for (int _i214 = 0; _i214 < _map211.size; ++_i214) + org.apache.thrift.protocol.TMap _map219 = iprot.readMapBegin(); + struct.parameters = new HashMap(2*_map219.size); + String _key220; + String _val221; + for (int _i222 = 0; _i222 < _map219.size; ++_i222) { - _key212 = iprot.readString(); - _val213 = iprot.readString(); - struct.parameters.put(_key212, _val213); + _key220 = iprot.readString(); + _val221 = iprot.readString(); + struct.parameters.put(_key220, _val221); } iprot.readMapEnd(); } @@ -1016,6 +1097,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Partition struct) t org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 9: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1033,9 +1122,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Partition struct) oprot.writeFieldBegin(VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.values.size())); - for (String _iter215 : struct.values) + for (String _iter223 : struct.values) { - oprot.writeString(_iter215); + oprot.writeString(_iter223); } oprot.writeListEnd(); } @@ -1066,10 +1155,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Partition struct) oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size())); - for (Map.Entry _iter216 : struct.parameters.entrySet()) + for (Map.Entry _iter224 : struct.parameters.entrySet()) { - oprot.writeString(_iter216.getKey()); - oprot.writeString(_iter216.getValue()); + oprot.writeString(_iter224.getKey()); + oprot.writeString(_iter224.getValue()); } oprot.writeMapEnd(); } @@ -1082,6 +1171,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Partition struct) oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1124,13 +1220,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Partition struct) t if (struct.isSetPrivileges()) { optionals.set(7); } - oprot.writeBitSet(optionals, 8); + if (struct.isSetCatName()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); if (struct.isSetValues()) { { oprot.writeI32(struct.values.size()); - for (String _iter217 : struct.values) + for (String _iter225 : struct.values) { - oprot.writeString(_iter217); + oprot.writeString(_iter225); } } } @@ -1152,31 +1251,34 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Partition struct) t if (struct.isSetParameters()) { { oprot.writeI32(struct.parameters.size()); - for (Map.Entry _iter218 : struct.parameters.entrySet()) + for (Map.Entry _iter226 : struct.parameters.entrySet()) { - oprot.writeString(_iter218.getKey()); - oprot.writeString(_iter218.getValue()); + oprot.writeString(_iter226.getKey()); + oprot.writeString(_iter226.getValue()); } } } if (struct.isSetPrivileges()) { struct.privileges.write(oprot); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(8); + BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list219 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.values = new ArrayList(_list219.size); - String _elem220; - for (int _i221 = 0; _i221 < _list219.size; ++_i221) + org.apache.thrift.protocol.TList _list227 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.values = new ArrayList(_list227.size); + String _elem228; + for (int _i229 = 0; _i229 < _list227.size; ++_i229) { - _elem220 = iprot.readString(); - struct.values.add(_elem220); + _elem228 = iprot.readString(); + struct.values.add(_elem228); } } struct.setValuesIsSet(true); @@ -1204,15 +1306,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) th } if (incoming.get(6)) { { - org.apache.thrift.protocol.TMap _map222 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.parameters = new HashMap(2*_map222.size); - String _key223; - String _val224; - for (int _i225 = 0; _i225 < _map222.size; ++_i225) + org.apache.thrift.protocol.TMap _map230 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.parameters = new HashMap(2*_map230.size); + String _key231; + String _val232; + for (int _i233 = 0; _i233 < _map230.size; ++_i233) { - _key223 = iprot.readString(); - _val224 = iprot.readString(); - struct.parameters.put(_key223, _val224); + _key231 = iprot.readString(); + _val232 = iprot.readString(); + struct.parameters.put(_key231, _val232); } } struct.parameters = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(struct.parameters); struct.setParametersIsSet(true); @@ -1222,6 +1324,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) th struct.privileges.read(iprot); struct.setPrivilegesIsSet(true); } + if (incoming.get(8)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java index 186eb23a4e..1f32e38e62 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionListCompos case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list252 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list252.size); - Partition _elem253; - for (int _i254 = 0; _i254 < _list252.size; ++_i254) + org.apache.thrift.protocol.TList _list260 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list260.size); + Partition _elem261; + for (int _i262 = 0; _i262 < _list260.size; ++_i262) { - _elem253 = new Partition(); - _elem253.read(iprot); - struct.partitions.add(_elem253); + _elem261 = new Partition(); + _elem261.read(iprot); + struct.partitions.add(_elem261); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionListCompo oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter255 : struct.partitions) + for (Partition _iter263 : struct.partitions) { - _iter255.write(oprot); + _iter263.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionListCompos if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (Partition _iter256 : struct.partitions) + for (Partition _iter264 : struct.partitions) { - _iter256.write(oprot); + _iter264.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionListComposi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list257 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list257.size); - Partition _elem258; - for (int _i259 = 0; _i259 < _list257.size; ++_i259) + org.apache.thrift.protocol.TList _list265 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list265.size); + Partition _elem266; + for (int _i267 = 0; _i267 < _list265.size; ++_i267) { - _elem258 = new Partition(); - _elem258.read(iprot); - struct.partitions.add(_elem258); + _elem266 = new Partition(); + _elem266.read(iprot); + struct.partitions.add(_elem266); } } struct.setPartitionsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java index fc91ce3a5d..247fdaa5ac 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField ROOT_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("rootPath", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField SHARED_SDPARTITION_SPEC_FIELD_DESC = new org.apache.thrift.protocol.TField("sharedSDPartitionSpec", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final org.apache.thrift.protocol.TField PARTITION_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionList", org.apache.thrift.protocol.TType.STRUCT, (short)5); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private String rootPath; // required private PartitionSpecWithSharedSD sharedSDPartitionSpec; // optional private PartitionListComposingSpec partitionList; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +64,8 @@ TABLE_NAME((short)2, "tableName"), ROOT_PATH((short)3, "rootPath"), SHARED_SDPARTITION_SPEC((short)4, "sharedSDPartitionSpec"), - PARTITION_LIST((short)5, "partitionList"); + PARTITION_LIST((short)5, "partitionList"), + CAT_NAME((short)6, "catName"); private static final Map byName = new HashMap(); @@ -87,6 +90,8 @@ public static _Fields findByThriftId(int fieldId) { return SHARED_SDPARTITION_SPEC; case 5: // PARTITION_LIST return PARTITION_LIST; + case 6: // CAT_NAME + return CAT_NAME; default: return null; } @@ -127,7 +132,7 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST}; + private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -141,6 +146,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionSpecWithSharedSD.class))); tmpMap.put(_Fields.PARTITION_LIST, new org.apache.thrift.meta_data.FieldMetaData("partitionList", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionListComposingSpec.class))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionSpec.class, metaDataMap); } @@ -178,6 +185,9 @@ public PartitionSpec(PartitionSpec other) { if (other.isSetPartitionList()) { this.partitionList = new PartitionListComposingSpec(other.partitionList); } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public PartitionSpec deepCopy() { @@ -191,6 +201,7 @@ public void clear() { this.rootPath = null; this.sharedSDPartitionSpec = null; this.partitionList = null; + this.catName = null; } public String getDbName() { @@ -308,6 +319,29 @@ public void setPartitionListIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -350,6 +384,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -370,6 +412,9 @@ public Object getFieldValue(_Fields field) { case PARTITION_LIST: return getPartitionList(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -391,6 +436,8 @@ public boolean isSet(_Fields field) { return isSetSharedSDPartitionSpec(); case PARTITION_LIST: return isSetPartitionList(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -453,6 +500,15 @@ public boolean equals(PartitionSpec that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -485,6 +541,11 @@ public int hashCode() { if (present_partitionList) list.add(partitionList); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -546,6 +607,16 @@ public int compareTo(PartitionSpec other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -609,6 +680,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -700,6 +781,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionSpec struc org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -742,6 +831,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionSpec stru oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -775,7 +871,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struc if (struct.isSetPartitionList()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetCatName()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetDbName()) { oprot.writeString(struct.dbName); } @@ -791,12 +890,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struc if (struct.isSetPartitionList()) { struct.partitionList.write(oprot); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); @@ -819,6 +921,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struct struct.partitionList.read(iprot); struct.setPartitionListIsSet(true); } + if (incoming.get(5)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java index e7ab52afa2..a450cd4fc2 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java @@ -434,14 +434,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionSpecWithSh case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list244 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list244.size); - PartitionWithoutSD _elem245; - for (int _i246 = 0; _i246 < _list244.size; ++_i246) + org.apache.thrift.protocol.TList _list252 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list252.size); + PartitionWithoutSD _elem253; + for (int _i254 = 0; _i254 < _list252.size; ++_i254) { - _elem245 = new PartitionWithoutSD(); - _elem245.read(iprot); - struct.partitions.add(_elem245); + _elem253 = new PartitionWithoutSD(); + _elem253.read(iprot); + struct.partitions.add(_elem253); } iprot.readListEnd(); } @@ -476,9 +476,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionSpecWithS oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (PartitionWithoutSD _iter247 : struct.partitions) + for (PartitionWithoutSD _iter255 : struct.partitions) { - _iter247.write(oprot); + _iter255.write(oprot); } oprot.writeListEnd(); } @@ -517,9 +517,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpecWithSh if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (PartitionWithoutSD _iter248 : struct.partitions) + for (PartitionWithoutSD _iter256 : struct.partitions) { - _iter248.write(oprot); + _iter256.write(oprot); } } } @@ -534,14 +534,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpecWithSha BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list249 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list249.size); - PartitionWithoutSD _elem250; - for (int _i251 = 0; _i251 < _list249.size; ++_i251) + org.apache.thrift.protocol.TList _list257 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list257.size); + PartitionWithoutSD _elem258; + for (int _i259 = 0; _i259 < _list257.size; ++_i259) { - _elem250 = new PartitionWithoutSD(); - _elem250.read(iprot); - struct.partitions.add(_elem250); + _elem258 = new PartitionWithoutSD(); + _elem258.read(iprot); + struct.partitions.add(_elem258); } } struct.setPartitionsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java index 9cac668860..c8707ca26d 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java @@ -46,6 +46,7 @@ private static final org.apache.thrift.protocol.TField PARTITION_ORDER_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionOrder", org.apache.thrift.protocol.TType.LIST, (short)6); private static final org.apache.thrift.protocol.TField ASCENDING_FIELD_DESC = new org.apache.thrift.protocol.TField("ascending", org.apache.thrift.protocol.TType.BOOL, (short)7); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxParts", org.apache.thrift.protocol.TType.I64, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -61,6 +62,7 @@ private List partitionOrder; // optional private boolean ascending; // optional private long maxParts; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -71,7 +73,8 @@ FILTER((short)5, "filter"), PARTITION_ORDER((short)6, "partitionOrder"), ASCENDING((short)7, "ascending"), - MAX_PARTS((short)8, "maxParts"); + MAX_PARTS((short)8, "maxParts"), + CAT_NAME((short)9, "catName"); private static final Map byName = new HashMap(); @@ -102,6 +105,8 @@ public static _Fields findByThriftId(int fieldId) { return ASCENDING; case 8: // MAX_PARTS return MAX_PARTS; + case 9: // CAT_NAME + return CAT_NAME; default: return null; } @@ -146,7 +151,7 @@ public String getFieldName() { private static final int __ASCENDING_ISSET_ID = 1; private static final int __MAXPARTS_ISSET_ID = 2; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.APPLY_DISTINCT,_Fields.FILTER,_Fields.PARTITION_ORDER,_Fields.ASCENDING,_Fields.MAX_PARTS}; + private static final _Fields optionals[] = {_Fields.APPLY_DISTINCT,_Fields.FILTER,_Fields.PARTITION_ORDER,_Fields.ASCENDING,_Fields.MAX_PARTS,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -168,6 +173,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("maxParts", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionValuesRequest.class, metaDataMap); } @@ -223,6 +230,9 @@ public PartitionValuesRequest(PartitionValuesRequest other) { } this.ascending = other.ascending; this.maxParts = other.maxParts; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public PartitionValuesRequest deepCopy() { @@ -242,6 +252,7 @@ public void clear() { this.maxParts = -1L; + this.catName = null; } public String getDbName() { @@ -455,6 +466,29 @@ public void setMaxPartsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXPARTS_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -521,6 +555,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -550,6 +592,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMaxParts(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -577,6 +622,8 @@ public boolean isSet(_Fields field) { return isSetAscending(); case MAX_PARTS: return isSetMaxParts(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -666,6 +713,15 @@ public boolean equals(PartitionValuesRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -713,6 +769,11 @@ public int hashCode() { if (present_maxParts) list.add(maxParts); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -804,6 +865,16 @@ public int compareTo(PartitionValuesRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -885,6 +956,16 @@ public String toString() { sb.append(this.maxParts); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -961,14 +1042,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRequ case 3: // PARTITION_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list506 = iprot.readListBegin(); - struct.partitionKeys = new ArrayList(_list506.size); - FieldSchema _elem507; - for (int _i508 = 0; _i508 < _list506.size; ++_i508) + org.apache.thrift.protocol.TList _list514 = iprot.readListBegin(); + struct.partitionKeys = new ArrayList(_list514.size); + FieldSchema _elem515; + for (int _i516 = 0; _i516 < _list514.size; ++_i516) { - _elem507 = new FieldSchema(); - _elem507.read(iprot); - struct.partitionKeys.add(_elem507); + _elem515 = new FieldSchema(); + _elem515.read(iprot); + struct.partitionKeys.add(_elem515); } iprot.readListEnd(); } @@ -996,14 +1077,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRequ case 6: // PARTITION_ORDER if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list509 = iprot.readListBegin(); - struct.partitionOrder = new ArrayList(_list509.size); - FieldSchema _elem510; - for (int _i511 = 0; _i511 < _list509.size; ++_i511) + org.apache.thrift.protocol.TList _list517 = iprot.readListBegin(); + struct.partitionOrder = new ArrayList(_list517.size); + FieldSchema _elem518; + for (int _i519 = 0; _i519 < _list517.size; ++_i519) { - _elem510 = new FieldSchema(); - _elem510.read(iprot); - struct.partitionOrder.add(_elem510); + _elem518 = new FieldSchema(); + _elem518.read(iprot); + struct.partitionOrder.add(_elem518); } iprot.readListEnd(); } @@ -1028,6 +1109,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRequ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 9: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1055,9 +1144,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesReq oprot.writeFieldBegin(PARTITION_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionKeys.size())); - for (FieldSchema _iter512 : struct.partitionKeys) + for (FieldSchema _iter520 : struct.partitionKeys) { - _iter512.write(oprot); + _iter520.write(oprot); } oprot.writeListEnd(); } @@ -1080,9 +1169,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesReq oprot.writeFieldBegin(PARTITION_ORDER_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionOrder.size())); - for (FieldSchema _iter513 : struct.partitionOrder) + for (FieldSchema _iter521 : struct.partitionOrder) { - _iter513.write(oprot); + _iter521.write(oprot); } oprot.writeListEnd(); } @@ -1099,6 +1188,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesReq oprot.writeI64(struct.maxParts); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1120,9 +1216,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequ oprot.writeString(struct.tblName); { oprot.writeI32(struct.partitionKeys.size()); - for (FieldSchema _iter514 : struct.partitionKeys) + for (FieldSchema _iter522 : struct.partitionKeys) { - _iter514.write(oprot); + _iter522.write(oprot); } } BitSet optionals = new BitSet(); @@ -1141,7 +1237,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequ if (struct.isSetMaxParts()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetCatName()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetApplyDistinct()) { oprot.writeBool(struct.applyDistinct); } @@ -1151,9 +1250,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequ if (struct.isSetPartitionOrder()) { { oprot.writeI32(struct.partitionOrder.size()); - for (FieldSchema _iter515 : struct.partitionOrder) + for (FieldSchema _iter523 : struct.partitionOrder) { - _iter515.write(oprot); + _iter523.write(oprot); } } } @@ -1163,6 +1262,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequ if (struct.isSetMaxParts()) { oprot.writeI64(struct.maxParts); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -1173,18 +1275,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesReque struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list516 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionKeys = new ArrayList(_list516.size); - FieldSchema _elem517; - for (int _i518 = 0; _i518 < _list516.size; ++_i518) + org.apache.thrift.protocol.TList _list524 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionKeys = new ArrayList(_list524.size); + FieldSchema _elem525; + for (int _i526 = 0; _i526 < _list524.size; ++_i526) { - _elem517 = new FieldSchema(); - _elem517.read(iprot); - struct.partitionKeys.add(_elem517); + _elem525 = new FieldSchema(); + _elem525.read(iprot); + struct.partitionKeys.add(_elem525); } } struct.setPartitionKeysIsSet(true); - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { struct.applyDistinct = iprot.readBool(); struct.setApplyDistinctIsSet(true); @@ -1195,14 +1297,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesReque } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list519 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionOrder = new ArrayList(_list519.size); - FieldSchema _elem520; - for (int _i521 = 0; _i521 < _list519.size; ++_i521) + org.apache.thrift.protocol.TList _list527 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionOrder = new ArrayList(_list527.size); + FieldSchema _elem528; + for (int _i529 = 0; _i529 < _list527.size; ++_i529) { - _elem520 = new FieldSchema(); - _elem520.read(iprot); - struct.partitionOrder.add(_elem520); + _elem528 = new FieldSchema(); + _elem528.read(iprot); + struct.partitionOrder.add(_elem528); } } struct.setPartitionOrderIsSet(true); @@ -1215,6 +1317,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesReque struct.maxParts = iprot.readI64(); struct.setMaxPartsIsSet(true); } + if (incoming.get(5)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java index 635b57e0e4..e336aa1218 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesResp case 1: // PARTITION_VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list530 = iprot.readListBegin(); - struct.partitionValues = new ArrayList(_list530.size); - PartitionValuesRow _elem531; - for (int _i532 = 0; _i532 < _list530.size; ++_i532) + org.apache.thrift.protocol.TList _list538 = iprot.readListBegin(); + struct.partitionValues = new ArrayList(_list538.size); + PartitionValuesRow _elem539; + for (int _i540 = 0; _i540 < _list538.size; ++_i540) { - _elem531 = new PartitionValuesRow(); - _elem531.read(iprot); - struct.partitionValues.add(_elem531); + _elem539 = new PartitionValuesRow(); + _elem539.read(iprot); + struct.partitionValues.add(_elem539); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesRes oprot.writeFieldBegin(PARTITION_VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionValues.size())); - for (PartitionValuesRow _iter533 : struct.partitionValues) + for (PartitionValuesRow _iter541 : struct.partitionValues) { - _iter533.write(oprot); + _iter541.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesResp TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.partitionValues.size()); - for (PartitionValuesRow _iter534 : struct.partitionValues) + for (PartitionValuesRow _iter542 : struct.partitionValues) { - _iter534.write(oprot); + _iter542.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesResp public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list535 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionValues = new ArrayList(_list535.size); - PartitionValuesRow _elem536; - for (int _i537 = 0; _i537 < _list535.size; ++_i537) + org.apache.thrift.protocol.TList _list543 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionValues = new ArrayList(_list543.size); + PartitionValuesRow _elem544; + for (int _i545 = 0; _i545 < _list543.size; ++_i545) { - _elem536 = new PartitionValuesRow(); - _elem536.read(iprot); - struct.partitionValues.add(_elem536); + _elem544 = new PartitionValuesRow(); + _elem544.read(iprot); + struct.partitionValues.add(_elem544); } } struct.setPartitionValuesIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java index 83e9e06db9..082c6c2b43 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRow case 1: // ROW if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list522 = iprot.readListBegin(); - struct.row = new ArrayList(_list522.size); - String _elem523; - for (int _i524 = 0; _i524 < _list522.size; ++_i524) + org.apache.thrift.protocol.TList _list530 = iprot.readListBegin(); + struct.row = new ArrayList(_list530.size); + String _elem531; + for (int _i532 = 0; _i532 < _list530.size; ++_i532) { - _elem523 = iprot.readString(); - struct.row.add(_elem523); + _elem531 = iprot.readString(); + struct.row.add(_elem531); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesRow oprot.writeFieldBegin(ROW_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.row.size())); - for (String _iter525 : struct.row) + for (String _iter533 : struct.row) { - oprot.writeString(_iter525); + oprot.writeString(_iter533); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRow TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.row.size()); - for (String _iter526 : struct.row) + for (String _iter534 : struct.row) { - oprot.writeString(_iter526); + oprot.writeString(_iter534); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRow public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRow struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list527 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.row = new ArrayList(_list527.size); - String _elem528; - for (int _i529 = 0; _i529 < _list527.size; ++_i529) + org.apache.thrift.protocol.TList _list535 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.row = new ArrayList(_list535.size); + String _elem536; + for (int _i537 = 0; _i537 < _list535.size; ++_i537) { - _elem528 = iprot.readString(); - struct.row.add(_elem528); + _elem536 = iprot.readString(); + struct.row.add(_elem536); } } struct.setRowIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java index ba8a7ca616..5807618b02 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java @@ -766,13 +766,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionWithoutSD case 1: // VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list226 = iprot.readListBegin(); - struct.values = new ArrayList(_list226.size); - String _elem227; - for (int _i228 = 0; _i228 < _list226.size; ++_i228) + org.apache.thrift.protocol.TList _list234 = iprot.readListBegin(); + struct.values = new ArrayList(_list234.size); + String _elem235; + for (int _i236 = 0; _i236 < _list234.size; ++_i236) { - _elem227 = iprot.readString(); - struct.values.add(_elem227); + _elem235 = iprot.readString(); + struct.values.add(_elem235); } iprot.readListEnd(); } @@ -808,15 +808,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionWithoutSD case 5: // PARAMETERS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map229 = iprot.readMapBegin(); - struct.parameters = new HashMap(2*_map229.size); - String _key230; - String _val231; - for (int _i232 = 0; _i232 < _map229.size; ++_i232) + org.apache.thrift.protocol.TMap _map237 = iprot.readMapBegin(); + struct.parameters = new HashMap(2*_map237.size); + String _key238; + String _val239; + for (int _i240 = 0; _i240 < _map237.size; ++_i240) { - _key230 = iprot.readString(); - _val231 = iprot.readString(); - struct.parameters.put(_key230, _val231); + _key238 = iprot.readString(); + _val239 = iprot.readString(); + struct.parameters.put(_key238, _val239); } iprot.readMapEnd(); } @@ -851,9 +851,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionWithoutSD oprot.writeFieldBegin(VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.values.size())); - for (String _iter233 : struct.values) + for (String _iter241 : struct.values) { - oprot.writeString(_iter233); + oprot.writeString(_iter241); } oprot.writeListEnd(); } @@ -874,10 +874,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionWithoutSD oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size())); - for (Map.Entry _iter234 : struct.parameters.entrySet()) + for (Map.Entry _iter242 : struct.parameters.entrySet()) { - oprot.writeString(_iter234.getKey()); - oprot.writeString(_iter234.getValue()); + oprot.writeString(_iter242.getKey()); + oprot.writeString(_iter242.getValue()); } oprot.writeMapEnd(); } @@ -930,9 +930,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionWithoutSD if (struct.isSetValues()) { { oprot.writeI32(struct.values.size()); - for (String _iter235 : struct.values) + for (String _iter243 : struct.values) { - oprot.writeString(_iter235); + oprot.writeString(_iter243); } } } @@ -948,10 +948,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionWithoutSD if (struct.isSetParameters()) { { oprot.writeI32(struct.parameters.size()); - for (Map.Entry _iter236 : struct.parameters.entrySet()) + for (Map.Entry _iter244 : struct.parameters.entrySet()) { - oprot.writeString(_iter236.getKey()); - oprot.writeString(_iter236.getValue()); + oprot.writeString(_iter244.getKey()); + oprot.writeString(_iter244.getValue()); } } } @@ -966,13 +966,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionWithoutSD s BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list237 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.values = new ArrayList(_list237.size); - String _elem238; - for (int _i239 = 0; _i239 < _list237.size; ++_i239) + org.apache.thrift.protocol.TList _list245 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.values = new ArrayList(_list245.size); + String _elem246; + for (int _i247 = 0; _i247 < _list245.size; ++_i247) { - _elem238 = iprot.readString(); - struct.values.add(_elem238); + _elem246 = iprot.readString(); + struct.values.add(_elem246); } } struct.setValuesIsSet(true); @@ -991,15 +991,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionWithoutSD s } if (incoming.get(4)) { { - org.apache.thrift.protocol.TMap _map240 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.parameters = new HashMap(2*_map240.size); - String _key241; - String _val242; - for (int _i243 = 0; _i243 < _map240.size; ++_i243) + org.apache.thrift.protocol.TMap _map248 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.parameters = new HashMap(2*_map248.size); + String _key249; + String _val250; + for (int _i251 = 0; _i251 < _map248.size; ++_i251) { - _key241 = iprot.readString(); - _val242 = iprot.readString(); - struct.parameters.put(_key241, _val242); + _key249 = iprot.readString(); + _val250 = iprot.readString(); + struct.parameters.put(_key249, _val250); } } struct.setParametersIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java index 13a5d6a917..0e72625e01 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField EXPR_FIELD_DESC = new org.apache.thrift.protocol.TField("expr", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField DEFAULT_PARTITION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("defaultPartitionName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxParts", org.apache.thrift.protocol.TType.I16, (short)5); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private ByteBuffer expr; // required private String defaultPartitionName; // optional private short maxParts; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +64,8 @@ TBL_NAME((short)2, "tblName"), EXPR((short)3, "expr"), DEFAULT_PARTITION_NAME((short)4, "defaultPartitionName"), - MAX_PARTS((short)5, "maxParts"); + MAX_PARTS((short)5, "maxParts"), + CAT_NAME((short)6, "catName"); private static final Map byName = new HashMap(); @@ -87,6 +90,8 @@ public static _Fields findByThriftId(int fieldId) { return DEFAULT_PARTITION_NAME; case 5: // MAX_PARTS return MAX_PARTS; + case 6: // CAT_NAME + return CAT_NAME; default: return null; } @@ -129,7 +134,7 @@ public String getFieldName() { // isset id assignments private static final int __MAXPARTS_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.DEFAULT_PARTITION_NAME,_Fields.MAX_PARTS}; + private static final _Fields optionals[] = {_Fields.DEFAULT_PARTITION_NAME,_Fields.MAX_PARTS,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -143,6 +148,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("maxParts", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsByExprRequest.class, metaDataMap); } @@ -181,6 +188,9 @@ public PartitionsByExprRequest(PartitionsByExprRequest other) { this.defaultPartitionName = other.defaultPartitionName; } this.maxParts = other.maxParts; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public PartitionsByExprRequest deepCopy() { @@ -195,6 +205,7 @@ public void clear() { this.defaultPartitionName = null; this.maxParts = (short)-1; + this.catName = null; } public String getDbName() { @@ -320,6 +331,29 @@ public void setMaxPartsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXPARTS_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -362,6 +396,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -382,6 +424,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMaxParts(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -403,6 +448,8 @@ public boolean isSet(_Fields field) { return isSetDefaultPartitionName(); case MAX_PARTS: return isSetMaxParts(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -465,6 +512,15 @@ public boolean equals(PartitionsByExprRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -497,6 +553,11 @@ public int hashCode() { if (present_maxParts) list.add(maxParts); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -558,6 +619,16 @@ public int compareTo(PartitionsByExprRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -617,6 +688,16 @@ public String toString() { sb.append(this.maxParts); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -714,6 +795,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsByExprReq org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -754,6 +843,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsByExprRe oprot.writeI16(struct.maxParts); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -781,13 +877,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprReq if (struct.isSetMaxParts()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetCatName()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetDefaultPartitionName()) { oprot.writeString(struct.defaultPartitionName); } if (struct.isSetMaxParts()) { oprot.writeI16(struct.maxParts); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -799,7 +901,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRequ struct.setTblNameIsSet(true); struct.expr = iprot.readBinary(); struct.setExprIsSet(true); - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.defaultPartitionName = iprot.readString(); struct.setDefaultPartitionNameIsSet(true); @@ -808,6 +910,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRequ struct.maxParts = iprot.readI16(); struct.setMaxPartsIsSet(true); } + if (incoming.get(2)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java index b5c15397dd..3f2ddcc330 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java @@ -439,14 +439,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsByExprRes case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list408 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list408.size); - Partition _elem409; - for (int _i410 = 0; _i410 < _list408.size; ++_i410) + org.apache.thrift.protocol.TList _list416 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list416.size); + Partition _elem417; + for (int _i418 = 0; _i418 < _list416.size; ++_i418) { - _elem409 = new Partition(); - _elem409.read(iprot); - struct.partitions.add(_elem409); + _elem417 = new Partition(); + _elem417.read(iprot); + struct.partitions.add(_elem417); } iprot.readListEnd(); } @@ -480,9 +480,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsByExprRe oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter411 : struct.partitions) + for (Partition _iter419 : struct.partitions) { - _iter411.write(oprot); + _iter419.write(oprot); } oprot.writeListEnd(); } @@ -510,9 +510,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRes TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.partitions.size()); - for (Partition _iter412 : struct.partitions) + for (Partition _iter420 : struct.partitions) { - _iter412.write(oprot); + _iter420.write(oprot); } } oprot.writeBool(struct.hasUnknownPartitions); @@ -522,14 +522,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRes public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list413 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list413.size); - Partition _elem414; - for (int _i415 = 0; _i415 < _list413.size; ++_i415) + org.apache.thrift.protocol.TList _list421 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list421.size); + Partition _elem422; + for (int _i423 = 0; _i423 < _list421.size; ++_i423) { - _elem414 = new Partition(); - _elem414.read(iprot); - struct.partitions.add(_elem414); + _elem422 = new Partition(); + _elem422.read(iprot); + struct.partitions.add(_elem422); } } struct.setPartitionsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java index ad6f05474c..91cf567e74 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java @@ -42,6 +42,7 @@ private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)4); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,13 +54,15 @@ private String tblName; // required private List colNames; // required private List partNames; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "dbName"), TBL_NAME((short)2, "tblName"), COL_NAMES((short)3, "colNames"), - PART_NAMES((short)4, "partNames"); + PART_NAMES((short)4, "partNames"), + CAT_NAME((short)5, "catName"); private static final Map byName = new HashMap(); @@ -82,6 +85,8 @@ public static _Fields findByThriftId(int fieldId) { return COL_NAMES; case 4: // PART_NAMES return PART_NAMES; + case 5: // CAT_NAME + return CAT_NAME; default: return null; } @@ -122,6 +127,7 @@ public String getFieldName() { } // isset id assignments + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -135,6 +141,8 @@ public String getFieldName() { tmpMap.put(_Fields.PART_NAMES, new org.apache.thrift.meta_data.FieldMetaData("partNames", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsRequest.class, metaDataMap); } @@ -173,6 +181,9 @@ public PartitionsStatsRequest(PartitionsStatsRequest other) { List __this__partNames = new ArrayList(other.partNames); this.partNames = __this__partNames; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public PartitionsStatsRequest deepCopy() { @@ -185,6 +196,7 @@ public void clear() { this.tblName = null; this.colNames = null; this.partNames = null; + this.catName = null; } public String getDbName() { @@ -309,6 +321,29 @@ public void setPartNamesIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -343,6 +378,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -360,6 +403,9 @@ public Object getFieldValue(_Fields field) { case PART_NAMES: return getPartNames(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -379,6 +425,8 @@ public boolean isSet(_Fields field) { return isSetColNames(); case PART_NAMES: return isSetPartNames(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -432,6 +480,15 @@ public boolean equals(PartitionsStatsRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -459,6 +516,11 @@ public int hashCode() { if (present_partNames) list.add(partNames); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -510,6 +572,16 @@ public int compareTo(PartitionsStatsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -561,6 +633,16 @@ public String toString() { sb.append(this.partNames); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -639,13 +721,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ case 3: // COL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list450 = iprot.readListBegin(); - struct.colNames = new ArrayList(_list450.size); - String _elem451; - for (int _i452 = 0; _i452 < _list450.size; ++_i452) + org.apache.thrift.protocol.TList _list458 = iprot.readListBegin(); + struct.colNames = new ArrayList(_list458.size); + String _elem459; + for (int _i460 = 0; _i460 < _list458.size; ++_i460) { - _elem451 = iprot.readString(); - struct.colNames.add(_elem451); + _elem459 = iprot.readString(); + struct.colNames.add(_elem459); } iprot.readListEnd(); } @@ -657,13 +739,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ case 4: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list453 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list453.size); - String _elem454; - for (int _i455 = 0; _i455 < _list453.size; ++_i455) + org.apache.thrift.protocol.TList _list461 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list461.size); + String _elem462; + for (int _i463 = 0; _i463 < _list461.size; ++_i463) { - _elem454 = iprot.readString(); - struct.partNames.add(_elem454); + _elem462 = iprot.readString(); + struct.partNames.add(_elem462); } iprot.readListEnd(); } @@ -672,6 +754,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -699,9 +789,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq oprot.writeFieldBegin(COL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.colNames.size())); - for (String _iter456 : struct.colNames) + for (String _iter464 : struct.colNames) { - oprot.writeString(_iter456); + oprot.writeString(_iter464); } oprot.writeListEnd(); } @@ -711,14 +801,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter457 : struct.partNames) + for (String _iter465 : struct.partNames) { - oprot.writeString(_iter457); + oprot.writeString(_iter465); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -740,18 +837,26 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsRequ oprot.writeString(struct.tblName); { oprot.writeI32(struct.colNames.size()); - for (String _iter458 : struct.colNames) + for (String _iter466 : struct.colNames) { - oprot.writeString(_iter458); + oprot.writeString(_iter466); } } { oprot.writeI32(struct.partNames.size()); - for (String _iter459 : struct.partNames) + for (String _iter467 : struct.partNames) { - oprot.writeString(_iter459); + oprot.writeString(_iter467); } } + BitSet optionals = new BitSet(); + if (struct.isSetCatName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -762,27 +867,32 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsReque struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list460 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.colNames = new ArrayList(_list460.size); - String _elem461; - for (int _i462 = 0; _i462 < _list460.size; ++_i462) + org.apache.thrift.protocol.TList _list468 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.colNames = new ArrayList(_list468.size); + String _elem469; + for (int _i470 = 0; _i470 < _list468.size; ++_i470) { - _elem461 = iprot.readString(); - struct.colNames.add(_elem461); + _elem469 = iprot.readString(); + struct.colNames.add(_elem469); } } struct.setColNamesIsSet(true); { - org.apache.thrift.protocol.TList _list463 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list463.size); - String _elem464; - for (int _i465 = 0; _i465 < _list463.size; ++_i465) + org.apache.thrift.protocol.TList _list471 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list471.size); + String _elem472; + for (int _i473 = 0; _i473 < _list471.size; ++_i473) { - _elem464 = iprot.readString(); - struct.partNames.add(_elem464); + _elem472 = iprot.readString(); + struct.partNames.add(_elem472); } } struct.setPartNamesIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java index d84af22da8..4caec8fa7e 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java @@ -363,26 +363,26 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsResu case 1: // PART_STATS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map424 = iprot.readMapBegin(); - struct.partStats = new HashMap>(2*_map424.size); - String _key425; - List _val426; - for (int _i427 = 0; _i427 < _map424.size; ++_i427) + org.apache.thrift.protocol.TMap _map432 = iprot.readMapBegin(); + struct.partStats = new HashMap>(2*_map432.size); + String _key433; + List _val434; + for (int _i435 = 0; _i435 < _map432.size; ++_i435) { - _key425 = iprot.readString(); + _key433 = iprot.readString(); { - org.apache.thrift.protocol.TList _list428 = iprot.readListBegin(); - _val426 = new ArrayList(_list428.size); - ColumnStatisticsObj _elem429; - for (int _i430 = 0; _i430 < _list428.size; ++_i430) + org.apache.thrift.protocol.TList _list436 = iprot.readListBegin(); + _val434 = new ArrayList(_list436.size); + ColumnStatisticsObj _elem437; + for (int _i438 = 0; _i438 < _list436.size; ++_i438) { - _elem429 = new ColumnStatisticsObj(); - _elem429.read(iprot); - _val426.add(_elem429); + _elem437 = new ColumnStatisticsObj(); + _elem437.read(iprot); + _val434.add(_elem437); } iprot.readListEnd(); } - struct.partStats.put(_key425, _val426); + struct.partStats.put(_key433, _val434); } iprot.readMapEnd(); } @@ -408,14 +408,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsRes oprot.writeFieldBegin(PART_STATS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.partStats.size())); - for (Map.Entry> _iter431 : struct.partStats.entrySet()) + for (Map.Entry> _iter439 : struct.partStats.entrySet()) { - oprot.writeString(_iter431.getKey()); + oprot.writeString(_iter439.getKey()); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter431.getValue().size())); - for (ColumnStatisticsObj _iter432 : _iter431.getValue()) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter439.getValue().size())); + for (ColumnStatisticsObj _iter440 : _iter439.getValue()) { - _iter432.write(oprot); + _iter440.write(oprot); } oprot.writeListEnd(); } @@ -443,14 +443,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResu TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.partStats.size()); - for (Map.Entry> _iter433 : struct.partStats.entrySet()) + for (Map.Entry> _iter441 : struct.partStats.entrySet()) { - oprot.writeString(_iter433.getKey()); + oprot.writeString(_iter441.getKey()); { - oprot.writeI32(_iter433.getValue().size()); - for (ColumnStatisticsObj _iter434 : _iter433.getValue()) + oprot.writeI32(_iter441.getValue().size()); + for (ColumnStatisticsObj _iter442 : _iter441.getValue()) { - _iter434.write(oprot); + _iter442.write(oprot); } } } @@ -461,25 +461,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResu public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map435 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); - struct.partStats = new HashMap>(2*_map435.size); - String _key436; - List _val437; - for (int _i438 = 0; _i438 < _map435.size; ++_i438) + org.apache.thrift.protocol.TMap _map443 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); + struct.partStats = new HashMap>(2*_map443.size); + String _key444; + List _val445; + for (int _i446 = 0; _i446 < _map443.size; ++_i446) { - _key436 = iprot.readString(); + _key444 = iprot.readString(); { - org.apache.thrift.protocol.TList _list439 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - _val437 = new ArrayList(_list439.size); - ColumnStatisticsObj _elem440; - for (int _i441 = 0; _i441 < _list439.size; ++_i441) + org.apache.thrift.protocol.TList _list447 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + _val445 = new ArrayList(_list447.size); + ColumnStatisticsObj _elem448; + for (int _i449 = 0; _i449 < _list447.size; ++_i449) { - _elem440 = new ColumnStatisticsObj(); - _elem440.read(iprot); - _val437.add(_elem440); + _elem448 = new ColumnStatisticsObj(); + _elem448.read(iprot); + _val445.add(_elem448); } } - struct.partStats.put(_key436, _val437); + struct.partStats.put(_key444, _val445); } } struct.setPartStatsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysRequest.java index 8930f34e1e..591348da43 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysRequest.java @@ -40,6 +40,7 @@ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -49,11 +50,13 @@ private String db_name; // required private String tbl_name; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), - TBL_NAME((short)2, "tbl_name"); + TBL_NAME((short)2, "tbl_name"), + CAT_NAME((short)3, "catName"); private static final Map byName = new HashMap(); @@ -72,6 +75,8 @@ public static _Fields findByThriftId(int fieldId) { return DB_NAME; case 2: // TBL_NAME return TBL_NAME; + case 3: // CAT_NAME + return CAT_NAME; default: return null; } @@ -112,6 +117,7 @@ public String getFieldName() { } // isset id assignments + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -119,6 +125,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PrimaryKeysRequest.class, metaDataMap); } @@ -145,6 +153,9 @@ public PrimaryKeysRequest(PrimaryKeysRequest other) { if (other.isSetTbl_name()) { this.tbl_name = other.tbl_name; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public PrimaryKeysRequest deepCopy() { @@ -155,6 +166,7 @@ public PrimaryKeysRequest deepCopy() { public void clear() { this.db_name = null; this.tbl_name = null; + this.catName = null; } public String getDb_name() { @@ -203,6 +215,29 @@ public void setTbl_nameIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -221,6 +256,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -232,6 +275,9 @@ public Object getFieldValue(_Fields field) { case TBL_NAME: return getTbl_name(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -247,6 +293,8 @@ public boolean isSet(_Fields field) { return isSetDb_name(); case TBL_NAME: return isSetTbl_name(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -282,6 +330,15 @@ public boolean equals(PrimaryKeysRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -299,6 +356,11 @@ public int hashCode() { if (present_tbl_name) list.add(tbl_name); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -330,6 +392,16 @@ public int compareTo(PrimaryKeysRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -365,6 +437,16 @@ public String toString() { sb.append(this.tbl_name); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -432,6 +514,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PrimaryKeysRequest org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -455,6 +545,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PrimaryKeysRequest oprot.writeString(struct.tbl_name); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -474,6 +571,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysRequest TTupleProtocol oprot = (TTupleProtocol) prot; oprot.writeString(struct.db_name); oprot.writeString(struct.tbl_name); + BitSet optionals = new BitSet(); + if (struct.isSetCatName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -483,6 +588,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysRequest s struct.setDb_nameIsSet(true); struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysResponse.java index 7eed56fd97..0b776fb4c9 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PrimaryKeysResponse case 1: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list312 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list312.size); - SQLPrimaryKey _elem313; - for (int _i314 = 0; _i314 < _list312.size; ++_i314) + org.apache.thrift.protocol.TList _list320 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list320.size); + SQLPrimaryKey _elem321; + for (int _i322 = 0; _i322 < _list320.size; ++_i322) { - _elem313 = new SQLPrimaryKey(); - _elem313.read(iprot); - struct.primaryKeys.add(_elem313); + _elem321 = new SQLPrimaryKey(); + _elem321.read(iprot); + struct.primaryKeys.add(_elem321); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PrimaryKeysRespons oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter315 : struct.primaryKeys) + for (SQLPrimaryKey _iter323 : struct.primaryKeys) { - _iter315.write(oprot); + _iter323.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysResponse TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter316 : struct.primaryKeys) + for (SQLPrimaryKey _iter324 : struct.primaryKeys) { - _iter316.write(oprot); + _iter324.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysResponse public void read(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list317 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list317.size); - SQLPrimaryKey _elem318; - for (int _i319 = 0; _i319 < _list317.size; ++_i319) + org.apache.thrift.protocol.TList _list325 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list325.size); + SQLPrimaryKey _elem326; + for (int _i327 = 0; _i327 < _list325.size; ++_i327) { - _elem318 = new SQLPrimaryKey(); - _elem318.read(iprot); - struct.primaryKeys.add(_elem318); + _elem326 = new SQLPrimaryKey(); + _elem326.read(iprot); + struct.primaryKeys.add(_elem326); } } struct.setPrimaryKeysIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java index 490d7185d6..e8cba59998 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java @@ -547,13 +547,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list752 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list752.size); - long _elem753; - for (int _i754 = 0; _i754 < _list752.size; ++_i754) + org.apache.thrift.protocol.TList _list760 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list760.size); + long _elem761; + for (int _i762 = 0; _i762 < _list760.size; ++_i762) { - _elem753 = iprot.readI64(); - struct.fileIds.add(_elem753); + _elem761 = iprot.readI64(); + struct.fileIds.add(_elem761); } iprot.readListEnd(); } @@ -565,13 +565,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 2: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list755 = iprot.readListBegin(); - struct.metadata = new ArrayList(_list755.size); - ByteBuffer _elem756; - for (int _i757 = 0; _i757 < _list755.size; ++_i757) + org.apache.thrift.protocol.TList _list763 = iprot.readListBegin(); + struct.metadata = new ArrayList(_list763.size); + ByteBuffer _elem764; + for (int _i765 = 0; _i765 < _list763.size; ++_i765) { - _elem756 = iprot.readBinary(); - struct.metadata.add(_elem756); + _elem764 = iprot.readBinary(); + struct.metadata.add(_elem764); } iprot.readListEnd(); } @@ -605,9 +605,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter758 : struct.fileIds) + for (long _iter766 : struct.fileIds) { - oprot.writeI64(_iter758); + oprot.writeI64(_iter766); } oprot.writeListEnd(); } @@ -617,9 +617,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (ByteBuffer _iter759 : struct.metadata) + for (ByteBuffer _iter767 : struct.metadata) { - oprot.writeBinary(_iter759); + oprot.writeBinary(_iter767); } oprot.writeListEnd(); } @@ -651,16 +651,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter760 : struct.fileIds) + for (long _iter768 : struct.fileIds) { - oprot.writeI64(_iter760); + oprot.writeI64(_iter768); } } { oprot.writeI32(struct.metadata.size()); - for (ByteBuffer _iter761 : struct.metadata) + for (ByteBuffer _iter769 : struct.metadata) { - oprot.writeBinary(_iter761); + oprot.writeBinary(_iter769); } } BitSet optionals = new BitSet(); @@ -677,24 +677,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list762 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list762.size); - long _elem763; - for (int _i764 = 0; _i764 < _list762.size; ++_i764) + org.apache.thrift.protocol.TList _list770 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list770.size); + long _elem771; + for (int _i772 = 0; _i772 < _list770.size; ++_i772) { - _elem763 = iprot.readI64(); - struct.fileIds.add(_elem763); + _elem771 = iprot.readI64(); + struct.fileIds.add(_elem771); } } struct.setFileIdsIsSet(true); { - org.apache.thrift.protocol.TList _list765 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new ArrayList(_list765.size); - ByteBuffer _elem766; - for (int _i767 = 0; _i767 < _list765.size; ++_i767) + org.apache.thrift.protocol.TList _list773 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new ArrayList(_list773.size); + ByteBuffer _elem774; + for (int _i775 = 0; _i775 < _list773.size; ++_i775) { - _elem766 = iprot.readBinary(); - struct.metadata.add(_elem766); + _elem774 = iprot.readBinary(); + struct.metadata.add(_elem774); } } struct.setMetadataIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java index d9226836e2..bad44add62 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java @@ -168,13 +168,13 @@ protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol ip if (field.type == NAMES_FIELD_DESC.type) { List names; { - org.apache.thrift.protocol.TList _list490 = iprot.readListBegin(); - names = new ArrayList(_list490.size); - String _elem491; - for (int _i492 = 0; _i492 < _list490.size; ++_i492) + org.apache.thrift.protocol.TList _list498 = iprot.readListBegin(); + names = new ArrayList(_list498.size); + String _elem499; + for (int _i500 = 0; _i500 < _list498.size; ++_i500) { - _elem491 = iprot.readString(); - names.add(_elem491); + _elem499 = iprot.readString(); + names.add(_elem499); } iprot.readListEnd(); } @@ -187,14 +187,14 @@ protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol ip if (field.type == EXPRS_FIELD_DESC.type) { List exprs; { - org.apache.thrift.protocol.TList _list493 = iprot.readListBegin(); - exprs = new ArrayList(_list493.size); - DropPartitionsExpr _elem494; - for (int _i495 = 0; _i495 < _list493.size; ++_i495) + org.apache.thrift.protocol.TList _list501 = iprot.readListBegin(); + exprs = new ArrayList(_list501.size); + DropPartitionsExpr _elem502; + for (int _i503 = 0; _i503 < _list501.size; ++_i503) { - _elem494 = new DropPartitionsExpr(); - _elem494.read(iprot); - exprs.add(_elem494); + _elem502 = new DropPartitionsExpr(); + _elem502.read(iprot); + exprs.add(_elem502); } iprot.readListEnd(); } @@ -219,9 +219,9 @@ protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol opr List names = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, names.size())); - for (String _iter496 : names) + for (String _iter504 : names) { - oprot.writeString(_iter496); + oprot.writeString(_iter504); } oprot.writeListEnd(); } @@ -230,9 +230,9 @@ protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol opr List exprs = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, exprs.size())); - for (DropPartitionsExpr _iter497 : exprs) + for (DropPartitionsExpr _iter505 : exprs) { - _iter497.write(oprot); + _iter505.write(oprot); } oprot.writeListEnd(); } @@ -250,13 +250,13 @@ protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot case NAMES: List names; { - org.apache.thrift.protocol.TList _list498 = iprot.readListBegin(); - names = new ArrayList(_list498.size); - String _elem499; - for (int _i500 = 0; _i500 < _list498.size; ++_i500) + org.apache.thrift.protocol.TList _list506 = iprot.readListBegin(); + names = new ArrayList(_list506.size); + String _elem507; + for (int _i508 = 0; _i508 < _list506.size; ++_i508) { - _elem499 = iprot.readString(); - names.add(_elem499); + _elem507 = iprot.readString(); + names.add(_elem507); } iprot.readListEnd(); } @@ -264,14 +264,14 @@ protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot case EXPRS: List exprs; { - org.apache.thrift.protocol.TList _list501 = iprot.readListBegin(); - exprs = new ArrayList(_list501.size); - DropPartitionsExpr _elem502; - for (int _i503 = 0; _i503 < _list501.size; ++_i503) + org.apache.thrift.protocol.TList _list509 = iprot.readListBegin(); + exprs = new ArrayList(_list509.size); + DropPartitionsExpr _elem510; + for (int _i511 = 0; _i511 < _list509.size; ++_i511) { - _elem502 = new DropPartitionsExpr(); - _elem502.read(iprot); - exprs.add(_elem502); + _elem510 = new DropPartitionsExpr(); + _elem510.read(iprot); + exprs.add(_elem510); } iprot.readListEnd(); } @@ -291,9 +291,9 @@ protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) List names = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, names.size())); - for (String _iter504 : names) + for (String _iter512 : names) { - oprot.writeString(_iter504); + oprot.writeString(_iter512); } oprot.writeListEnd(); } @@ -302,9 +302,9 @@ protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) List exprs = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, exprs.size())); - for (DropPartitionsExpr _iter505 : exprs) + for (DropPartitionsExpr _iter513 : exprs) { - _iter505.write(oprot); + _iter513.write(oprot); } oprot.writeListEnd(); } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLCheckConstraint.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLCheckConstraint.java index 24ce47da1e..9a5d3283fa 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLCheckConstraint.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLCheckConstraint.java @@ -38,14 +38,15 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class SQLCheckConstraint implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SQLCheckConstraint"); - private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField CHECK_EXPRESSION_FIELD_DESC = new org.apache.thrift.protocol.TField("check_expression", org.apache.thrift.protocol.TType.STRING, (short)4); - private static final org.apache.thrift.protocol.TField DC_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dc_name", org.apache.thrift.protocol.TType.STRING, (short)5); - private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)6); - private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); - private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField CHECK_EXPRESSION_FIELD_DESC = new org.apache.thrift.protocol.TField("check_expression", org.apache.thrift.protocol.TType.STRING, (short)5); + private static final org.apache.thrift.protocol.TField DC_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dc_name", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); + private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8); + private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,6 +54,7 @@ schemes.put(TupleScheme.class, new SQLCheckConstraintTupleSchemeFactory()); } + private String catName; // required private String table_db; // required private String table_name; // required private String column_name; // required @@ -64,14 +66,15 @@ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TABLE_DB((short)1, "table_db"), - TABLE_NAME((short)2, "table_name"), - COLUMN_NAME((short)3, "column_name"), - CHECK_EXPRESSION((short)4, "check_expression"), - DC_NAME((short)5, "dc_name"), - ENABLE_CSTR((short)6, "enable_cstr"), - VALIDATE_CSTR((short)7, "validate_cstr"), - RELY_CSTR((short)8, "rely_cstr"); + CAT_NAME((short)1, "catName"), + TABLE_DB((short)2, "table_db"), + TABLE_NAME((short)3, "table_name"), + COLUMN_NAME((short)4, "column_name"), + CHECK_EXPRESSION((short)5, "check_expression"), + DC_NAME((short)6, "dc_name"), + ENABLE_CSTR((short)7, "enable_cstr"), + VALIDATE_CSTR((short)8, "validate_cstr"), + RELY_CSTR((short)9, "rely_cstr"); private static final Map byName = new HashMap(); @@ -86,21 +89,23 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // TABLE_DB + case 1: // CAT_NAME + return CAT_NAME; + case 2: // TABLE_DB return TABLE_DB; - case 2: // TABLE_NAME + case 3: // TABLE_NAME return TABLE_NAME; - case 3: // COLUMN_NAME + case 4: // COLUMN_NAME return COLUMN_NAME; - case 4: // CHECK_EXPRESSION + case 5: // CHECK_EXPRESSION return CHECK_EXPRESSION; - case 5: // DC_NAME + case 6: // DC_NAME return DC_NAME; - case 6: // ENABLE_CSTR + case 7: // ENABLE_CSTR return ENABLE_CSTR; - case 7: // VALIDATE_CSTR + case 8: // VALIDATE_CSTR return VALIDATE_CSTR; - case 8: // RELY_CSTR + case 9: // RELY_CSTR return RELY_CSTR; default: return null; @@ -149,6 +154,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_DB, new org.apache.thrift.meta_data.FieldMetaData("table_db", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -173,6 +180,7 @@ public SQLCheckConstraint() { } public SQLCheckConstraint( + String catName, String table_db, String table_name, String column_name, @@ -183,6 +191,7 @@ public SQLCheckConstraint( boolean rely_cstr) { this(); + this.catName = catName; this.table_db = table_db; this.table_name = table_name; this.column_name = column_name; @@ -201,6 +210,9 @@ public SQLCheckConstraint( */ public SQLCheckConstraint(SQLCheckConstraint other) { __isset_bitfield = other.__isset_bitfield; + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetTable_db()) { this.table_db = other.table_db; } @@ -227,6 +239,7 @@ public SQLCheckConstraint deepCopy() { @Override public void clear() { + this.catName = null; this.table_db = null; this.table_name = null; this.column_name = null; @@ -240,6 +253,29 @@ public void clear() { this.rely_cstr = false; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getTable_db() { return this.table_db; } @@ -423,6 +459,14 @@ public void setRely_cstrIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case TABLE_DB: if (value == null) { unsetTable_db(); @@ -492,6 +536,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case TABLE_DB: return getTable_db(); @@ -527,6 +574,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case TABLE_DB: return isSetTable_db(); case TABLE_NAME: @@ -560,6 +609,15 @@ public boolean equals(SQLCheckConstraint that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_table_db = true && this.isSetTable_db(); boolean that_present_table_db = true && that.isSetTable_db(); if (this_present_table_db || that_present_table_db) { @@ -639,6 +697,11 @@ public boolean equals(SQLCheckConstraint that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_table_db = true && (isSetTable_db()); list.add(present_table_db); if (present_table_db) @@ -690,6 +753,16 @@ public int compareTo(SQLCheckConstraint other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetTable_db()).compareTo(other.isSetTable_db()); if (lastComparison != 0) { return lastComparison; @@ -790,6 +863,14 @@ public String toString() { StringBuilder sb = new StringBuilder("SQLCheckConstraint("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("table_db:"); if (this.table_db == null) { sb.append("null"); @@ -886,7 +967,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLCheckConstraint break; } switch (schemeField.id) { - case 1: // TABLE_DB + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TABLE_DB if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.table_db = iprot.readString(); struct.setTable_dbIsSet(true); @@ -894,7 +983,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLCheckConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TABLE_NAME + case 3: // TABLE_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); @@ -902,7 +991,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLCheckConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // COLUMN_NAME + case 4: // COLUMN_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.column_name = iprot.readString(); struct.setColumn_nameIsSet(true); @@ -910,7 +999,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLCheckConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // CHECK_EXPRESSION + case 5: // CHECK_EXPRESSION if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.check_expression = iprot.readString(); struct.setCheck_expressionIsSet(true); @@ -918,7 +1007,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLCheckConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 5: // DC_NAME + case 6: // DC_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.dc_name = iprot.readString(); struct.setDc_nameIsSet(true); @@ -926,7 +1015,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLCheckConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 6: // ENABLE_CSTR + case 7: // ENABLE_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.enable_cstr = iprot.readBool(); struct.setEnable_cstrIsSet(true); @@ -934,7 +1023,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLCheckConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 7: // VALIDATE_CSTR + case 8: // VALIDATE_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.validate_cstr = iprot.readBool(); struct.setValidate_cstrIsSet(true); @@ -942,7 +1031,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLCheckConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 8: // RELY_CSTR + case 9: // RELY_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); @@ -963,6 +1052,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SQLCheckConstraint struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.table_db != null) { oprot.writeFieldBegin(TABLE_DB_FIELD_DESC); oprot.writeString(struct.table_db); @@ -1015,31 +1109,37 @@ public SQLCheckConstraintTupleScheme getScheme() { public void write(org.apache.thrift.protocol.TProtocol prot, SQLCheckConstraint struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetTable_db()) { + if (struct.isSetCatName()) { optionals.set(0); } - if (struct.isSetTable_name()) { + if (struct.isSetTable_db()) { optionals.set(1); } - if (struct.isSetColumn_name()) { + if (struct.isSetTable_name()) { optionals.set(2); } - if (struct.isSetCheck_expression()) { + if (struct.isSetColumn_name()) { optionals.set(3); } - if (struct.isSetDc_name()) { + if (struct.isSetCheck_expression()) { optionals.set(4); } - if (struct.isSetEnable_cstr()) { + if (struct.isSetDc_name()) { optionals.set(5); } - if (struct.isSetValidate_cstr()) { + if (struct.isSetEnable_cstr()) { optionals.set(6); } - if (struct.isSetRely_cstr()) { + if (struct.isSetValidate_cstr()) { optionals.set(7); } - oprot.writeBitSet(optionals, 8); + if (struct.isSetRely_cstr()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } if (struct.isSetTable_db()) { oprot.writeString(struct.table_db); } @@ -1069,36 +1169,40 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SQLCheckConstraint @Override public void read(org.apache.thrift.protocol.TProtocol prot, SQLCheckConstraint struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(8); + BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } + if (incoming.get(1)) { struct.table_db = iprot.readString(); struct.setTable_dbIsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); } - if (incoming.get(2)) { + if (incoming.get(3)) { struct.column_name = iprot.readString(); struct.setColumn_nameIsSet(true); } - if (incoming.get(3)) { + if (incoming.get(4)) { struct.check_expression = iprot.readString(); struct.setCheck_expressionIsSet(true); } - if (incoming.get(4)) { + if (incoming.get(5)) { struct.dc_name = iprot.readString(); struct.setDc_nameIsSet(true); } - if (incoming.get(5)) { + if (incoming.get(6)) { struct.enable_cstr = iprot.readBool(); struct.setEnable_cstrIsSet(true); } - if (incoming.get(6)) { + if (incoming.get(7)) { struct.validate_cstr = iprot.readBool(); struct.setValidate_cstrIsSet(true); } - if (incoming.get(7)) { + if (incoming.get(8)) { struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLDefaultConstraint.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLDefaultConstraint.java index 185b77ed21..7b8a257e8b 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLDefaultConstraint.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLDefaultConstraint.java @@ -38,14 +38,15 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class SQLDefaultConstraint implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SQLDefaultConstraint"); - private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField DEFAULT_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("default_value", org.apache.thrift.protocol.TType.STRING, (short)4); - private static final org.apache.thrift.protocol.TField DC_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dc_name", org.apache.thrift.protocol.TType.STRING, (short)5); - private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)6); - private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); - private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField DEFAULT_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("default_value", org.apache.thrift.protocol.TType.STRING, (short)5); + private static final org.apache.thrift.protocol.TField DC_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dc_name", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); + private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8); + private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,6 +54,7 @@ schemes.put(TupleScheme.class, new SQLDefaultConstraintTupleSchemeFactory()); } + private String catName; // required private String table_db; // required private String table_name; // required private String column_name; // required @@ -64,14 +66,15 @@ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TABLE_DB((short)1, "table_db"), - TABLE_NAME((short)2, "table_name"), - COLUMN_NAME((short)3, "column_name"), - DEFAULT_VALUE((short)4, "default_value"), - DC_NAME((short)5, "dc_name"), - ENABLE_CSTR((short)6, "enable_cstr"), - VALIDATE_CSTR((short)7, "validate_cstr"), - RELY_CSTR((short)8, "rely_cstr"); + CAT_NAME((short)1, "catName"), + TABLE_DB((short)2, "table_db"), + TABLE_NAME((short)3, "table_name"), + COLUMN_NAME((short)4, "column_name"), + DEFAULT_VALUE((short)5, "default_value"), + DC_NAME((short)6, "dc_name"), + ENABLE_CSTR((short)7, "enable_cstr"), + VALIDATE_CSTR((short)8, "validate_cstr"), + RELY_CSTR((short)9, "rely_cstr"); private static final Map byName = new HashMap(); @@ -86,21 +89,23 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // TABLE_DB + case 1: // CAT_NAME + return CAT_NAME; + case 2: // TABLE_DB return TABLE_DB; - case 2: // TABLE_NAME + case 3: // TABLE_NAME return TABLE_NAME; - case 3: // COLUMN_NAME + case 4: // COLUMN_NAME return COLUMN_NAME; - case 4: // DEFAULT_VALUE + case 5: // DEFAULT_VALUE return DEFAULT_VALUE; - case 5: // DC_NAME + case 6: // DC_NAME return DC_NAME; - case 6: // ENABLE_CSTR + case 7: // ENABLE_CSTR return ENABLE_CSTR; - case 7: // VALIDATE_CSTR + case 8: // VALIDATE_CSTR return VALIDATE_CSTR; - case 8: // RELY_CSTR + case 9: // RELY_CSTR return RELY_CSTR; default: return null; @@ -149,6 +154,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_DB, new org.apache.thrift.meta_data.FieldMetaData("table_db", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -173,6 +180,7 @@ public SQLDefaultConstraint() { } public SQLDefaultConstraint( + String catName, String table_db, String table_name, String column_name, @@ -183,6 +191,7 @@ public SQLDefaultConstraint( boolean rely_cstr) { this(); + this.catName = catName; this.table_db = table_db; this.table_name = table_name; this.column_name = column_name; @@ -201,6 +210,9 @@ public SQLDefaultConstraint( */ public SQLDefaultConstraint(SQLDefaultConstraint other) { __isset_bitfield = other.__isset_bitfield; + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetTable_db()) { this.table_db = other.table_db; } @@ -227,6 +239,7 @@ public SQLDefaultConstraint deepCopy() { @Override public void clear() { + this.catName = null; this.table_db = null; this.table_name = null; this.column_name = null; @@ -240,6 +253,29 @@ public void clear() { this.rely_cstr = false; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getTable_db() { return this.table_db; } @@ -423,6 +459,14 @@ public void setRely_cstrIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case TABLE_DB: if (value == null) { unsetTable_db(); @@ -492,6 +536,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case TABLE_DB: return getTable_db(); @@ -527,6 +574,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case TABLE_DB: return isSetTable_db(); case TABLE_NAME: @@ -560,6 +609,15 @@ public boolean equals(SQLDefaultConstraint that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_table_db = true && this.isSetTable_db(); boolean that_present_table_db = true && that.isSetTable_db(); if (this_present_table_db || that_present_table_db) { @@ -639,6 +697,11 @@ public boolean equals(SQLDefaultConstraint that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_table_db = true && (isSetTable_db()); list.add(present_table_db); if (present_table_db) @@ -690,6 +753,16 @@ public int compareTo(SQLDefaultConstraint other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetTable_db()).compareTo(other.isSetTable_db()); if (lastComparison != 0) { return lastComparison; @@ -790,6 +863,14 @@ public String toString() { StringBuilder sb = new StringBuilder("SQLDefaultConstraint("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("table_db:"); if (this.table_db == null) { sb.append("null"); @@ -886,7 +967,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstrain break; } switch (schemeField.id) { - case 1: // TABLE_DB + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TABLE_DB if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.table_db = iprot.readString(); struct.setTable_dbIsSet(true); @@ -894,7 +983,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TABLE_NAME + case 3: // TABLE_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); @@ -902,7 +991,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // COLUMN_NAME + case 4: // COLUMN_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.column_name = iprot.readString(); struct.setColumn_nameIsSet(true); @@ -910,7 +999,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // DEFAULT_VALUE + case 5: // DEFAULT_VALUE if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.default_value = iprot.readString(); struct.setDefault_valueIsSet(true); @@ -918,7 +1007,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 5: // DC_NAME + case 6: // DC_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.dc_name = iprot.readString(); struct.setDc_nameIsSet(true); @@ -926,7 +1015,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 6: // ENABLE_CSTR + case 7: // ENABLE_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.enable_cstr = iprot.readBool(); struct.setEnable_cstrIsSet(true); @@ -934,7 +1023,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 7: // VALIDATE_CSTR + case 8: // VALIDATE_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.validate_cstr = iprot.readBool(); struct.setValidate_cstrIsSet(true); @@ -942,7 +1031,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 8: // RELY_CSTR + case 9: // RELY_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); @@ -963,6 +1052,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SQLDefaultConstrai struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.table_db != null) { oprot.writeFieldBegin(TABLE_DB_FIELD_DESC); oprot.writeString(struct.table_db); @@ -1015,31 +1109,37 @@ public SQLDefaultConstraintTupleScheme getScheme() { public void write(org.apache.thrift.protocol.TProtocol prot, SQLDefaultConstraint struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetTable_db()) { + if (struct.isSetCatName()) { optionals.set(0); } - if (struct.isSetTable_name()) { + if (struct.isSetTable_db()) { optionals.set(1); } - if (struct.isSetColumn_name()) { + if (struct.isSetTable_name()) { optionals.set(2); } - if (struct.isSetDefault_value()) { + if (struct.isSetColumn_name()) { optionals.set(3); } - if (struct.isSetDc_name()) { + if (struct.isSetDefault_value()) { optionals.set(4); } - if (struct.isSetEnable_cstr()) { + if (struct.isSetDc_name()) { optionals.set(5); } - if (struct.isSetValidate_cstr()) { + if (struct.isSetEnable_cstr()) { optionals.set(6); } - if (struct.isSetRely_cstr()) { + if (struct.isSetValidate_cstr()) { optionals.set(7); } - oprot.writeBitSet(optionals, 8); + if (struct.isSetRely_cstr()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } if (struct.isSetTable_db()) { oprot.writeString(struct.table_db); } @@ -1069,36 +1169,40 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SQLDefaultConstrain @Override public void read(org.apache.thrift.protocol.TProtocol prot, SQLDefaultConstraint struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(8); + BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } + if (incoming.get(1)) { struct.table_db = iprot.readString(); struct.setTable_dbIsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); } - if (incoming.get(2)) { + if (incoming.get(3)) { struct.column_name = iprot.readString(); struct.setColumn_nameIsSet(true); } - if (incoming.get(3)) { + if (incoming.get(4)) { struct.default_value = iprot.readString(); struct.setDefault_valueIsSet(true); } - if (incoming.get(4)) { + if (incoming.get(5)) { struct.dc_name = iprot.readString(); struct.setDc_nameIsSet(true); } - if (incoming.get(5)) { + if (incoming.get(6)) { struct.enable_cstr = iprot.readBool(); struct.setEnable_cstrIsSet(true); } - if (incoming.get(6)) { + if (incoming.get(7)) { struct.validate_cstr = iprot.readBool(); struct.setValidate_cstrIsSet(true); } - if (incoming.get(7)) { + if (incoming.get(8)) { struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLForeignKey.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLForeignKey.java index 6cf6f311e5..a7cf241772 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLForeignKey.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLForeignKey.java @@ -52,6 +52,7 @@ private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)12); private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)13); private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)14); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)15); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -73,6 +74,7 @@ private boolean enable_cstr; // required private boolean validate_cstr; // required private boolean rely_cstr; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -89,7 +91,8 @@ PK_NAME((short)11, "pk_name"), ENABLE_CSTR((short)12, "enable_cstr"), VALIDATE_CSTR((short)13, "validate_cstr"), - RELY_CSTR((short)14, "rely_cstr"); + RELY_CSTR((short)14, "rely_cstr"), + CAT_NAME((short)15, "catName"); private static final Map byName = new HashMap(); @@ -132,6 +135,8 @@ public static _Fields findByThriftId(int fieldId) { return VALIDATE_CSTR; case 14: // RELY_CSTR return RELY_CSTR; + case 15: // CAT_NAME + return CAT_NAME; default: return null; } @@ -179,6 +184,7 @@ public String getFieldName() { private static final int __VALIDATE_CSTR_ISSET_ID = 4; private static final int __RELY_CSTR_ISSET_ID = 5; private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -210,6 +216,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.RELY_CSTR, new org.apache.thrift.meta_data.FieldMetaData("rely_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SQLForeignKey.class, metaDataMap); } @@ -291,6 +299,9 @@ public SQLForeignKey(SQLForeignKey other) { this.enable_cstr = other.enable_cstr; this.validate_cstr = other.validate_cstr; this.rely_cstr = other.rely_cstr; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public SQLForeignKey deepCopy() { @@ -319,6 +330,7 @@ public void clear() { this.validate_cstr = false; setRely_cstrIsSet(false); this.rely_cstr = false; + this.catName = null; } public String getPktable_db() { @@ -637,6 +649,29 @@ public void setRely_cstrIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __RELY_CSTR_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case PKTABLE_DB: @@ -751,6 +786,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -798,6 +841,9 @@ public Object getFieldValue(_Fields field) { case RELY_CSTR: return isRely_cstr(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -837,6 +883,8 @@ public boolean isSet(_Fields field) { return isSetValidate_cstr(); case RELY_CSTR: return isSetRely_cstr(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -980,6 +1028,15 @@ public boolean equals(SQLForeignKey that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -1057,6 +1114,11 @@ public int hashCode() { if (present_rely_cstr) list.add(rely_cstr); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -1208,6 +1270,16 @@ public int compareTo(SQLForeignKey other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1315,6 +1387,16 @@ public String toString() { sb.append("rely_cstr:"); sb.append(this.rely_cstr); first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1472,6 +1554,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLForeignKey struc org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 15: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1543,6 +1633,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SQLForeignKey stru oprot.writeFieldBegin(RELY_CSTR_FIELD_DESC); oprot.writeBool(struct.rely_cstr); oprot.writeFieldEnd(); + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1603,7 +1700,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SQLForeignKey struc if (struct.isSetRely_cstr()) { optionals.set(13); } - oprot.writeBitSet(optionals, 14); + if (struct.isSetCatName()) { + optionals.set(14); + } + oprot.writeBitSet(optionals, 15); if (struct.isSetPktable_db()) { oprot.writeString(struct.pktable_db); } @@ -1646,12 +1746,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SQLForeignKey struc if (struct.isSetRely_cstr()) { oprot.writeBool(struct.rely_cstr); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, SQLForeignKey struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(14); + BitSet incoming = iprot.readBitSet(15); if (incoming.get(0)) { struct.pktable_db = iprot.readString(); struct.setPktable_dbIsSet(true); @@ -1708,6 +1811,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SQLForeignKey struct struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); } + if (incoming.get(14)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLNotNullConstraint.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLNotNullConstraint.java index cb0f2952b4..97b9c1fd3b 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLNotNullConstraint.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLNotNullConstraint.java @@ -38,13 +38,14 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class SQLNotNullConstraint implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SQLNotNullConstraint"); - private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField NN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("nn_name", org.apache.thrift.protocol.TType.STRING, (short)4); - private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)5); - private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)6); - private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField NN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("nn_name", org.apache.thrift.protocol.TType.STRING, (short)5); + private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)6); + private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); + private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -52,6 +53,7 @@ schemes.put(TupleScheme.class, new SQLNotNullConstraintTupleSchemeFactory()); } + private String catName; // required private String table_db; // required private String table_name; // required private String column_name; // required @@ -62,13 +64,14 @@ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TABLE_DB((short)1, "table_db"), - TABLE_NAME((short)2, "table_name"), - COLUMN_NAME((short)3, "column_name"), - NN_NAME((short)4, "nn_name"), - ENABLE_CSTR((short)5, "enable_cstr"), - VALIDATE_CSTR((short)6, "validate_cstr"), - RELY_CSTR((short)7, "rely_cstr"); + CAT_NAME((short)1, "catName"), + TABLE_DB((short)2, "table_db"), + TABLE_NAME((short)3, "table_name"), + COLUMN_NAME((short)4, "column_name"), + NN_NAME((short)5, "nn_name"), + ENABLE_CSTR((short)6, "enable_cstr"), + VALIDATE_CSTR((short)7, "validate_cstr"), + RELY_CSTR((short)8, "rely_cstr"); private static final Map byName = new HashMap(); @@ -83,19 +86,21 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // TABLE_DB + case 1: // CAT_NAME + return CAT_NAME; + case 2: // TABLE_DB return TABLE_DB; - case 2: // TABLE_NAME + case 3: // TABLE_NAME return TABLE_NAME; - case 3: // COLUMN_NAME + case 4: // COLUMN_NAME return COLUMN_NAME; - case 4: // NN_NAME + case 5: // NN_NAME return NN_NAME; - case 5: // ENABLE_CSTR + case 6: // ENABLE_CSTR return ENABLE_CSTR; - case 6: // VALIDATE_CSTR + case 7: // VALIDATE_CSTR return VALIDATE_CSTR; - case 7: // RELY_CSTR + case 8: // RELY_CSTR return RELY_CSTR; default: return null; @@ -144,6 +149,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_DB, new org.apache.thrift.meta_data.FieldMetaData("table_db", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -166,6 +173,7 @@ public SQLNotNullConstraint() { } public SQLNotNullConstraint( + String catName, String table_db, String table_name, String column_name, @@ -175,6 +183,7 @@ public SQLNotNullConstraint( boolean rely_cstr) { this(); + this.catName = catName; this.table_db = table_db; this.table_name = table_name; this.column_name = column_name; @@ -192,6 +201,9 @@ public SQLNotNullConstraint( */ public SQLNotNullConstraint(SQLNotNullConstraint other) { __isset_bitfield = other.__isset_bitfield; + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetTable_db()) { this.table_db = other.table_db; } @@ -215,6 +227,7 @@ public SQLNotNullConstraint deepCopy() { @Override public void clear() { + this.catName = null; this.table_db = null; this.table_name = null; this.column_name = null; @@ -227,6 +240,29 @@ public void clear() { this.rely_cstr = false; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getTable_db() { return this.table_db; } @@ -387,6 +423,14 @@ public void setRely_cstrIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case TABLE_DB: if (value == null) { unsetTable_db(); @@ -448,6 +492,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case TABLE_DB: return getTable_db(); @@ -480,6 +527,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case TABLE_DB: return isSetTable_db(); case TABLE_NAME: @@ -511,6 +560,15 @@ public boolean equals(SQLNotNullConstraint that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_table_db = true && this.isSetTable_db(); boolean that_present_table_db = true && that.isSetTable_db(); if (this_present_table_db || that_present_table_db) { @@ -581,6 +639,11 @@ public boolean equals(SQLNotNullConstraint that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_table_db = true && (isSetTable_db()); list.add(present_table_db); if (present_table_db) @@ -627,6 +690,16 @@ public int compareTo(SQLNotNullConstraint other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetTable_db()).compareTo(other.isSetTable_db()); if (lastComparison != 0) { return lastComparison; @@ -717,6 +790,14 @@ public String toString() { StringBuilder sb = new StringBuilder("SQLNotNullConstraint("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("table_db:"); if (this.table_db == null) { sb.append("null"); @@ -805,7 +886,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLNotNullConstrain break; } switch (schemeField.id) { - case 1: // TABLE_DB + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TABLE_DB if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.table_db = iprot.readString(); struct.setTable_dbIsSet(true); @@ -813,7 +902,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLNotNullConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TABLE_NAME + case 3: // TABLE_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); @@ -821,7 +910,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLNotNullConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // COLUMN_NAME + case 4: // COLUMN_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.column_name = iprot.readString(); struct.setColumn_nameIsSet(true); @@ -829,7 +918,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLNotNullConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // NN_NAME + case 5: // NN_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.nn_name = iprot.readString(); struct.setNn_nameIsSet(true); @@ -837,7 +926,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLNotNullConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 5: // ENABLE_CSTR + case 6: // ENABLE_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.enable_cstr = iprot.readBool(); struct.setEnable_cstrIsSet(true); @@ -845,7 +934,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLNotNullConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 6: // VALIDATE_CSTR + case 7: // VALIDATE_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.validate_cstr = iprot.readBool(); struct.setValidate_cstrIsSet(true); @@ -853,7 +942,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLNotNullConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 7: // RELY_CSTR + case 8: // RELY_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); @@ -874,6 +963,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SQLNotNullConstrai struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.table_db != null) { oprot.writeFieldBegin(TABLE_DB_FIELD_DESC); oprot.writeString(struct.table_db); @@ -921,28 +1015,34 @@ public SQLNotNullConstraintTupleScheme getScheme() { public void write(org.apache.thrift.protocol.TProtocol prot, SQLNotNullConstraint struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetTable_db()) { + if (struct.isSetCatName()) { optionals.set(0); } - if (struct.isSetTable_name()) { + if (struct.isSetTable_db()) { optionals.set(1); } - if (struct.isSetColumn_name()) { + if (struct.isSetTable_name()) { optionals.set(2); } - if (struct.isSetNn_name()) { + if (struct.isSetColumn_name()) { optionals.set(3); } - if (struct.isSetEnable_cstr()) { + if (struct.isSetNn_name()) { optionals.set(4); } - if (struct.isSetValidate_cstr()) { + if (struct.isSetEnable_cstr()) { optionals.set(5); } - if (struct.isSetRely_cstr()) { + if (struct.isSetValidate_cstr()) { optionals.set(6); } - oprot.writeBitSet(optionals, 7); + if (struct.isSetRely_cstr()) { + optionals.set(7); + } + oprot.writeBitSet(optionals, 8); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } if (struct.isSetTable_db()) { oprot.writeString(struct.table_db); } @@ -969,32 +1069,36 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SQLNotNullConstrain @Override public void read(org.apache.thrift.protocol.TProtocol prot, SQLNotNullConstraint struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(7); + BitSet incoming = iprot.readBitSet(8); if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } + if (incoming.get(1)) { struct.table_db = iprot.readString(); struct.setTable_dbIsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); } - if (incoming.get(2)) { + if (incoming.get(3)) { struct.column_name = iprot.readString(); struct.setColumn_nameIsSet(true); } - if (incoming.get(3)) { + if (incoming.get(4)) { struct.nn_name = iprot.readString(); struct.setNn_nameIsSet(true); } - if (incoming.get(4)) { + if (incoming.get(5)) { struct.enable_cstr = iprot.readBool(); struct.setEnable_cstrIsSet(true); } - if (incoming.get(5)) { + if (incoming.get(6)) { struct.validate_cstr = iprot.readBool(); struct.setValidate_cstrIsSet(true); } - if (incoming.get(6)) { + if (incoming.get(7)) { struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLPrimaryKey.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLPrimaryKey.java index 45484a2acb..b77316f941 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLPrimaryKey.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLPrimaryKey.java @@ -46,6 +46,7 @@ private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)6); private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -61,6 +62,7 @@ private boolean enable_cstr; // required private boolean validate_cstr; // required private boolean rely_cstr; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -71,7 +73,8 @@ PK_NAME((short)5, "pk_name"), ENABLE_CSTR((short)6, "enable_cstr"), VALIDATE_CSTR((short)7, "validate_cstr"), - RELY_CSTR((short)8, "rely_cstr"); + RELY_CSTR((short)8, "rely_cstr"), + CAT_NAME((short)9, "catName"); private static final Map byName = new HashMap(); @@ -102,6 +105,8 @@ public static _Fields findByThriftId(int fieldId) { return VALIDATE_CSTR; case 8: // RELY_CSTR return RELY_CSTR; + case 9: // CAT_NAME + return CAT_NAME; default: return null; } @@ -147,6 +152,7 @@ public String getFieldName() { private static final int __VALIDATE_CSTR_ISSET_ID = 2; private static final int __RELY_CSTR_ISSET_ID = 3; private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -166,6 +172,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.RELY_CSTR, new org.apache.thrift.meta_data.FieldMetaData("rely_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SQLPrimaryKey.class, metaDataMap); } @@ -219,6 +227,9 @@ public SQLPrimaryKey(SQLPrimaryKey other) { this.enable_cstr = other.enable_cstr; this.validate_cstr = other.validate_cstr; this.rely_cstr = other.rely_cstr; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public SQLPrimaryKey deepCopy() { @@ -239,6 +250,7 @@ public void clear() { this.validate_cstr = false; setRely_cstrIsSet(false); this.rely_cstr = false; + this.catName = null; } public String getTable_db() { @@ -421,6 +433,29 @@ public void setRely_cstrIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __RELY_CSTR_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TABLE_DB: @@ -487,6 +522,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -516,6 +559,9 @@ public Object getFieldValue(_Fields field) { case RELY_CSTR: return isRely_cstr(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -543,6 +589,8 @@ public boolean isSet(_Fields field) { return isSetValidate_cstr(); case RELY_CSTR: return isSetRely_cstr(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -632,6 +680,15 @@ public boolean equals(SQLPrimaryKey that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -679,6 +736,11 @@ public int hashCode() { if (present_rely_cstr) list.add(rely_cstr); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -770,6 +832,16 @@ public int compareTo(SQLPrimaryKey other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -837,6 +909,16 @@ public String toString() { sb.append("rely_cstr:"); sb.append(this.rely_cstr); first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -946,6 +1028,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLPrimaryKey struc org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 9: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -991,6 +1081,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SQLPrimaryKey stru oprot.writeFieldBegin(RELY_CSTR_FIELD_DESC); oprot.writeBool(struct.rely_cstr); oprot.writeFieldEnd(); + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1033,7 +1130,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SQLPrimaryKey struc if (struct.isSetRely_cstr()) { optionals.set(7); } - oprot.writeBitSet(optionals, 8); + if (struct.isSetCatName()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); if (struct.isSetTable_db()) { oprot.writeString(struct.table_db); } @@ -1058,12 +1158,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SQLPrimaryKey struc if (struct.isSetRely_cstr()) { oprot.writeBool(struct.rely_cstr); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, SQLPrimaryKey struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(8); + BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { struct.table_db = iprot.readString(); struct.setTable_dbIsSet(true); @@ -1096,6 +1199,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SQLPrimaryKey struct struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); } + if (incoming.get(8)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLUniqueConstraint.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLUniqueConstraint.java index 493fded8e8..a25a91cab4 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLUniqueConstraint.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLUniqueConstraint.java @@ -38,14 +38,15 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class SQLUniqueConstraint implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SQLUniqueConstraint"); - private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField KEY_SEQ_FIELD_DESC = new org.apache.thrift.protocol.TField("key_seq", org.apache.thrift.protocol.TType.I32, (short)4); - private static final org.apache.thrift.protocol.TField UK_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("uk_name", org.apache.thrift.protocol.TType.STRING, (short)5); - private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)6); - private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); - private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField KEY_SEQ_FIELD_DESC = new org.apache.thrift.protocol.TField("key_seq", org.apache.thrift.protocol.TType.I32, (short)5); + private static final org.apache.thrift.protocol.TField UK_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("uk_name", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); + private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8); + private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,6 +54,7 @@ schemes.put(TupleScheme.class, new SQLUniqueConstraintTupleSchemeFactory()); } + private String catName; // required private String table_db; // required private String table_name; // required private String column_name; // required @@ -64,14 +66,15 @@ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TABLE_DB((short)1, "table_db"), - TABLE_NAME((short)2, "table_name"), - COLUMN_NAME((short)3, "column_name"), - KEY_SEQ((short)4, "key_seq"), - UK_NAME((short)5, "uk_name"), - ENABLE_CSTR((short)6, "enable_cstr"), - VALIDATE_CSTR((short)7, "validate_cstr"), - RELY_CSTR((short)8, "rely_cstr"); + CAT_NAME((short)1, "catName"), + TABLE_DB((short)2, "table_db"), + TABLE_NAME((short)3, "table_name"), + COLUMN_NAME((short)4, "column_name"), + KEY_SEQ((short)5, "key_seq"), + UK_NAME((short)6, "uk_name"), + ENABLE_CSTR((short)7, "enable_cstr"), + VALIDATE_CSTR((short)8, "validate_cstr"), + RELY_CSTR((short)9, "rely_cstr"); private static final Map byName = new HashMap(); @@ -86,21 +89,23 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // TABLE_DB + case 1: // CAT_NAME + return CAT_NAME; + case 2: // TABLE_DB return TABLE_DB; - case 2: // TABLE_NAME + case 3: // TABLE_NAME return TABLE_NAME; - case 3: // COLUMN_NAME + case 4: // COLUMN_NAME return COLUMN_NAME; - case 4: // KEY_SEQ + case 5: // KEY_SEQ return KEY_SEQ; - case 5: // UK_NAME + case 6: // UK_NAME return UK_NAME; - case 6: // ENABLE_CSTR + case 7: // ENABLE_CSTR return ENABLE_CSTR; - case 7: // VALIDATE_CSTR + case 8: // VALIDATE_CSTR return VALIDATE_CSTR; - case 8: // RELY_CSTR + case 9: // RELY_CSTR return RELY_CSTR; default: return null; @@ -150,6 +155,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_DB, new org.apache.thrift.meta_data.FieldMetaData("table_db", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -174,6 +181,7 @@ public SQLUniqueConstraint() { } public SQLUniqueConstraint( + String catName, String table_db, String table_name, String column_name, @@ -184,6 +192,7 @@ public SQLUniqueConstraint( boolean rely_cstr) { this(); + this.catName = catName; this.table_db = table_db; this.table_name = table_name; this.column_name = column_name; @@ -203,6 +212,9 @@ public SQLUniqueConstraint( */ public SQLUniqueConstraint(SQLUniqueConstraint other) { __isset_bitfield = other.__isset_bitfield; + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetTable_db()) { this.table_db = other.table_db; } @@ -227,6 +239,7 @@ public SQLUniqueConstraint deepCopy() { @Override public void clear() { + this.catName = null; this.table_db = null; this.table_name = null; this.column_name = null; @@ -241,6 +254,29 @@ public void clear() { this.rely_cstr = false; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getTable_db() { return this.table_db; } @@ -423,6 +459,14 @@ public void setRely_cstrIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case TABLE_DB: if (value == null) { unsetTable_db(); @@ -492,6 +536,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case TABLE_DB: return getTable_db(); @@ -527,6 +574,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case TABLE_DB: return isSetTable_db(); case TABLE_NAME: @@ -560,6 +609,15 @@ public boolean equals(SQLUniqueConstraint that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_table_db = true && this.isSetTable_db(); boolean that_present_table_db = true && that.isSetTable_db(); if (this_present_table_db || that_present_table_db) { @@ -639,6 +697,11 @@ public boolean equals(SQLUniqueConstraint that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_table_db = true && (isSetTable_db()); list.add(present_table_db); if (present_table_db) @@ -690,6 +753,16 @@ public int compareTo(SQLUniqueConstraint other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetTable_db()).compareTo(other.isSetTable_db()); if (lastComparison != 0) { return lastComparison; @@ -790,6 +863,14 @@ public String toString() { StringBuilder sb = new StringBuilder("SQLUniqueConstraint("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("table_db:"); if (this.table_db == null) { sb.append("null"); @@ -882,7 +963,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLUniqueConstraint break; } switch (schemeField.id) { - case 1: // TABLE_DB + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TABLE_DB if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.table_db = iprot.readString(); struct.setTable_dbIsSet(true); @@ -890,7 +979,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLUniqueConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TABLE_NAME + case 3: // TABLE_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); @@ -898,7 +987,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLUniqueConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // COLUMN_NAME + case 4: // COLUMN_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.column_name = iprot.readString(); struct.setColumn_nameIsSet(true); @@ -906,7 +995,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLUniqueConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // KEY_SEQ + case 5: // KEY_SEQ if (schemeField.type == org.apache.thrift.protocol.TType.I32) { struct.key_seq = iprot.readI32(); struct.setKey_seqIsSet(true); @@ -914,7 +1003,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLUniqueConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 5: // UK_NAME + case 6: // UK_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.uk_name = iprot.readString(); struct.setUk_nameIsSet(true); @@ -922,7 +1011,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLUniqueConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 6: // ENABLE_CSTR + case 7: // ENABLE_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.enable_cstr = iprot.readBool(); struct.setEnable_cstrIsSet(true); @@ -930,7 +1019,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLUniqueConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 7: // VALIDATE_CSTR + case 8: // VALIDATE_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.validate_cstr = iprot.readBool(); struct.setValidate_cstrIsSet(true); @@ -938,7 +1027,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLUniqueConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 8: // RELY_CSTR + case 9: // RELY_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); @@ -959,6 +1048,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SQLUniqueConstrain struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.table_db != null) { oprot.writeFieldBegin(TABLE_DB_FIELD_DESC); oprot.writeString(struct.table_db); @@ -1009,31 +1103,37 @@ public SQLUniqueConstraintTupleScheme getScheme() { public void write(org.apache.thrift.protocol.TProtocol prot, SQLUniqueConstraint struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetTable_db()) { + if (struct.isSetCatName()) { optionals.set(0); } - if (struct.isSetTable_name()) { + if (struct.isSetTable_db()) { optionals.set(1); } - if (struct.isSetColumn_name()) { + if (struct.isSetTable_name()) { optionals.set(2); } - if (struct.isSetKey_seq()) { + if (struct.isSetColumn_name()) { optionals.set(3); } - if (struct.isSetUk_name()) { + if (struct.isSetKey_seq()) { optionals.set(4); } - if (struct.isSetEnable_cstr()) { + if (struct.isSetUk_name()) { optionals.set(5); } - if (struct.isSetValidate_cstr()) { + if (struct.isSetEnable_cstr()) { optionals.set(6); } - if (struct.isSetRely_cstr()) { + if (struct.isSetValidate_cstr()) { optionals.set(7); } - oprot.writeBitSet(optionals, 8); + if (struct.isSetRely_cstr()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } if (struct.isSetTable_db()) { oprot.writeString(struct.table_db); } @@ -1063,36 +1163,40 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SQLUniqueConstraint @Override public void read(org.apache.thrift.protocol.TProtocol prot, SQLUniqueConstraint struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(8); + BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } + if (incoming.get(1)) { struct.table_db = iprot.readString(); struct.setTable_dbIsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); } - if (incoming.get(2)) { + if (incoming.get(3)) { struct.column_name = iprot.readString(); struct.setColumn_nameIsSet(true); } - if (incoming.get(3)) { + if (incoming.get(4)) { struct.key_seq = iprot.readI32(); struct.setKey_seqIsSet(true); } - if (incoming.get(4)) { + if (incoming.get(5)) { struct.uk_name = iprot.readString(); struct.setUk_nameIsSet(true); } - if (incoming.get(5)) { + if (incoming.get(6)) { struct.enable_cstr = iprot.readBool(); struct.setEnable_cstrIsSet(true); } - if (incoming.get(6)) { + if (incoming.get(7)) { struct.validate_cstr = iprot.readBool(); struct.setValidate_cstrIsSet(true); } - if (incoming.get(7)) { + if (incoming.get(8)) { struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java index 3f39bd5eee..d49f1c41a7 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java @@ -445,14 +445,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Schema struct) thro case 1: // FIELD_SCHEMAS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list284 = iprot.readListBegin(); - struct.fieldSchemas = new ArrayList(_list284.size); - FieldSchema _elem285; - for (int _i286 = 0; _i286 < _list284.size; ++_i286) + org.apache.thrift.protocol.TList _list292 = iprot.readListBegin(); + struct.fieldSchemas = new ArrayList(_list292.size); + FieldSchema _elem293; + for (int _i294 = 0; _i294 < _list292.size; ++_i294) { - _elem285 = new FieldSchema(); - _elem285.read(iprot); - struct.fieldSchemas.add(_elem285); + _elem293 = new FieldSchema(); + _elem293.read(iprot); + struct.fieldSchemas.add(_elem293); } iprot.readListEnd(); } @@ -464,15 +464,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Schema struct) thro case 2: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map287 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map287.size); - String _key288; - String _val289; - for (int _i290 = 0; _i290 < _map287.size; ++_i290) + org.apache.thrift.protocol.TMap _map295 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map295.size); + String _key296; + String _val297; + for (int _i298 = 0; _i298 < _map295.size; ++_i298) { - _key288 = iprot.readString(); - _val289 = iprot.readString(); - struct.properties.put(_key288, _val289); + _key296 = iprot.readString(); + _val297 = iprot.readString(); + struct.properties.put(_key296, _val297); } iprot.readMapEnd(); } @@ -498,9 +498,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Schema struct) thr oprot.writeFieldBegin(FIELD_SCHEMAS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.fieldSchemas.size())); - for (FieldSchema _iter291 : struct.fieldSchemas) + for (FieldSchema _iter299 : struct.fieldSchemas) { - _iter291.write(oprot); + _iter299.write(oprot); } oprot.writeListEnd(); } @@ -510,10 +510,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Schema struct) thr oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter292 : struct.properties.entrySet()) + for (Map.Entry _iter300 : struct.properties.entrySet()) { - oprot.writeString(_iter292.getKey()); - oprot.writeString(_iter292.getValue()); + oprot.writeString(_iter300.getKey()); + oprot.writeString(_iter300.getValue()); } oprot.writeMapEnd(); } @@ -547,19 +547,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Schema struct) thro if (struct.isSetFieldSchemas()) { { oprot.writeI32(struct.fieldSchemas.size()); - for (FieldSchema _iter293 : struct.fieldSchemas) + for (FieldSchema _iter301 : struct.fieldSchemas) { - _iter293.write(oprot); + _iter301.write(oprot); } } } if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter294 : struct.properties.entrySet()) + for (Map.Entry _iter302 : struct.properties.entrySet()) { - oprot.writeString(_iter294.getKey()); - oprot.writeString(_iter294.getValue()); + oprot.writeString(_iter302.getKey()); + oprot.writeString(_iter302.getValue()); } } } @@ -571,29 +571,29 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Schema struct) throw BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list295 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.fieldSchemas = new ArrayList(_list295.size); - FieldSchema _elem296; - for (int _i297 = 0; _i297 < _list295.size; ++_i297) + org.apache.thrift.protocol.TList _list303 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.fieldSchemas = new ArrayList(_list303.size); + FieldSchema _elem304; + for (int _i305 = 0; _i305 < _list303.size; ++_i305) { - _elem296 = new FieldSchema(); - _elem296.read(iprot); - struct.fieldSchemas.add(_elem296); + _elem304 = new FieldSchema(); + _elem304.read(iprot); + struct.fieldSchemas.add(_elem304); } } struct.setFieldSchemasIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TMap _map298 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map298.size); - String _key299; - String _val300; - for (int _i301 = 0; _i301 < _map298.size; ++_i301) + org.apache.thrift.protocol.TMap _map306 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map306.size); + String _key307; + String _val308; + for (int _i309 = 0; _i309 < _map306.size; ++_i309) { - _key299 = iprot.readString(); - _val300 = iprot.readString(); - struct.properties.put(_key299, _val300); + _key307 = iprot.readString(); + _val308 = iprot.readString(); + struct.properties.put(_key307, _val308); } } struct.setPropertiesIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java index 50efdbdf30..da919d7305 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java @@ -1119,14 +1119,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SchemaVersion struc case 4: // COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list880 = iprot.readListBegin(); - struct.cols = new ArrayList(_list880.size); - FieldSchema _elem881; - for (int _i882 = 0; _i882 < _list880.size; ++_i882) + org.apache.thrift.protocol.TList _list888 = iprot.readListBegin(); + struct.cols = new ArrayList(_list888.size); + FieldSchema _elem889; + for (int _i890 = 0; _i890 < _list888.size; ++_i890) { - _elem881 = new FieldSchema(); - _elem881.read(iprot); - struct.cols.add(_elem881); + _elem889 = new FieldSchema(); + _elem889.read(iprot); + struct.cols.add(_elem889); } iprot.readListEnd(); } @@ -1212,9 +1212,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SchemaVersion stru oprot.writeFieldBegin(COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.cols.size())); - for (FieldSchema _iter883 : struct.cols) + for (FieldSchema _iter891 : struct.cols) { - _iter883.write(oprot); + _iter891.write(oprot); } oprot.writeListEnd(); } @@ -1323,9 +1323,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SchemaVersion struc if (struct.isSetCols()) { { oprot.writeI32(struct.cols.size()); - for (FieldSchema _iter884 : struct.cols) + for (FieldSchema _iter892 : struct.cols) { - _iter884.write(oprot); + _iter892.write(oprot); } } } @@ -1368,14 +1368,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SchemaVersion struct } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list885 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.cols = new ArrayList(_list885.size); - FieldSchema _elem886; - for (int _i887 = 0; _i887 < _list885.size; ++_i887) + org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.cols = new ArrayList(_list893.size); + FieldSchema _elem894; + for (int _i895 = 0; _i895 < _list893.size; ++_i895) { - _elem886 = new FieldSchema(); - _elem886.read(iprot); - struct.cols.add(_elem886); + _elem894 = new FieldSchema(); + _elem894.read(iprot); + struct.cols.add(_elem894); } } struct.setColsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java index a7aba9fb0e..71957f79f2 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java @@ -859,15 +859,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SerDeInfo struct) t case 3: // PARAMETERS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map104 = iprot.readMapBegin(); - struct.parameters = new HashMap(2*_map104.size); - String _key105; - String _val106; - for (int _i107 = 0; _i107 < _map104.size; ++_i107) + org.apache.thrift.protocol.TMap _map112 = iprot.readMapBegin(); + struct.parameters = new HashMap(2*_map112.size); + String _key113; + String _val114; + for (int _i115 = 0; _i115 < _map112.size; ++_i115) { - _key105 = iprot.readString(); - _val106 = iprot.readString(); - struct.parameters.put(_key105, _val106); + _key113 = iprot.readString(); + _val114 = iprot.readString(); + struct.parameters.put(_key113, _val114); } iprot.readMapEnd(); } @@ -935,10 +935,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SerDeInfo struct) oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size())); - for (Map.Entry _iter108 : struct.parameters.entrySet()) + for (Map.Entry _iter116 : struct.parameters.entrySet()) { - oprot.writeString(_iter108.getKey()); - oprot.writeString(_iter108.getValue()); + oprot.writeString(_iter116.getKey()); + oprot.writeString(_iter116.getValue()); } oprot.writeMapEnd(); } @@ -1021,10 +1021,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SerDeInfo struct) t if (struct.isSetParameters()) { { oprot.writeI32(struct.parameters.size()); - for (Map.Entry _iter109 : struct.parameters.entrySet()) + for (Map.Entry _iter117 : struct.parameters.entrySet()) { - oprot.writeString(_iter109.getKey()); - oprot.writeString(_iter109.getValue()); + oprot.writeString(_iter117.getKey()); + oprot.writeString(_iter117.getValue()); } } } @@ -1056,15 +1056,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SerDeInfo struct) th } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map110 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.parameters = new HashMap(2*_map110.size); - String _key111; - String _val112; - for (int _i113 = 0; _i113 < _map110.size; ++_i113) + org.apache.thrift.protocol.TMap _map118 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.parameters = new HashMap(2*_map118.size); + String _key119; + String _val120; + for (int _i121 = 0; _i121 < _map118.size; ++_i121) { - _key111 = iprot.readString(); - _val112 = iprot.readString(); - struct.parameters.put(_key111, _val112); + _key119 = iprot.readString(); + _val120 = iprot.readString(); + struct.parameters.put(_key119, _val120); } } struct.parameters = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(struct.parameters); struct.setParametersIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java index dd95409393..a0ae84e760 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java @@ -435,14 +435,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SetPartitionsStatsR case 1: // COL_STATS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list276 = iprot.readListBegin(); - struct.colStats = new ArrayList(_list276.size); - ColumnStatistics _elem277; - for (int _i278 = 0; _i278 < _list276.size; ++_i278) + org.apache.thrift.protocol.TList _list284 = iprot.readListBegin(); + struct.colStats = new ArrayList(_list284.size); + ColumnStatistics _elem285; + for (int _i286 = 0; _i286 < _list284.size; ++_i286) { - _elem277 = new ColumnStatistics(); - _elem277.read(iprot); - struct.colStats.add(_elem277); + _elem285 = new ColumnStatistics(); + _elem285.read(iprot); + struct.colStats.add(_elem285); } iprot.readListEnd(); } @@ -476,9 +476,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SetPartitionsStats oprot.writeFieldBegin(COL_STATS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.colStats.size())); - for (ColumnStatistics _iter279 : struct.colStats) + for (ColumnStatistics _iter287 : struct.colStats) { - _iter279.write(oprot); + _iter287.write(oprot); } oprot.writeListEnd(); } @@ -508,9 +508,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsR TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.colStats.size()); - for (ColumnStatistics _iter280 : struct.colStats) + for (ColumnStatistics _iter288 : struct.colStats) { - _iter280.write(oprot); + _iter288.write(oprot); } } BitSet optionals = new BitSet(); @@ -527,14 +527,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsR public void read(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list281 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.colStats = new ArrayList(_list281.size); - ColumnStatistics _elem282; - for (int _i283 = 0; _i283 < _list281.size; ++_i283) + org.apache.thrift.protocol.TList _list289 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.colStats = new ArrayList(_list289.size); + ColumnStatistics _elem290; + for (int _i291 = 0; _i291 < _list289.size; ++_i291) { - _elem282 = new ColumnStatistics(); - _elem282.read(iprot); - struct.colStats.add(_elem282); + _elem290 = new ColumnStatistics(); + _elem290.read(iprot); + struct.colStats.add(_elem290); } } struct.setColStatsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java index dd1366ba6d..35d6d24c30 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowCompactResponse case 1: // COMPACTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list660 = iprot.readListBegin(); - struct.compacts = new ArrayList(_list660.size); - ShowCompactResponseElement _elem661; - for (int _i662 = 0; _i662 < _list660.size; ++_i662) + org.apache.thrift.protocol.TList _list668 = iprot.readListBegin(); + struct.compacts = new ArrayList(_list668.size); + ShowCompactResponseElement _elem669; + for (int _i670 = 0; _i670 < _list668.size; ++_i670) { - _elem661 = new ShowCompactResponseElement(); - _elem661.read(iprot); - struct.compacts.add(_elem661); + _elem669 = new ShowCompactResponseElement(); + _elem669.read(iprot); + struct.compacts.add(_elem669); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowCompactRespons oprot.writeFieldBegin(COMPACTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.compacts.size())); - for (ShowCompactResponseElement _iter663 : struct.compacts) + for (ShowCompactResponseElement _iter671 : struct.compacts) { - _iter663.write(oprot); + _iter671.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.compacts.size()); - for (ShowCompactResponseElement _iter664 : struct.compacts) + for (ShowCompactResponseElement _iter672 : struct.compacts) { - _iter664.write(oprot); + _iter672.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list665 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.compacts = new ArrayList(_list665.size); - ShowCompactResponseElement _elem666; - for (int _i667 = 0; _i667 < _list665.size; ++_i667) + org.apache.thrift.protocol.TList _list673 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.compacts = new ArrayList(_list673.size); + ShowCompactResponseElement _elem674; + for (int _i675 = 0; _i675 < _list673.size; ++_i675) { - _elem666 = new ShowCompactResponseElement(); - _elem666.read(iprot); - struct.compacts.add(_elem666); + _elem674 = new ShowCompactResponseElement(); + _elem674.read(iprot); + struct.compacts.add(_elem674); } } struct.setCompactsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java index 941f756f80..c8fd20eb1c 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowLocksResponse s case 1: // LOCKS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list626 = iprot.readListBegin(); - struct.locks = new ArrayList(_list626.size); - ShowLocksResponseElement _elem627; - for (int _i628 = 0; _i628 < _list626.size; ++_i628) + org.apache.thrift.protocol.TList _list634 = iprot.readListBegin(); + struct.locks = new ArrayList(_list634.size); + ShowLocksResponseElement _elem635; + for (int _i636 = 0; _i636 < _list634.size; ++_i636) { - _elem627 = new ShowLocksResponseElement(); - _elem627.read(iprot); - struct.locks.add(_elem627); + _elem635 = new ShowLocksResponseElement(); + _elem635.read(iprot); + struct.locks.add(_elem635); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowLocksResponse oprot.writeFieldBegin(LOCKS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.locks.size())); - for (ShowLocksResponseElement _iter629 : struct.locks) + for (ShowLocksResponseElement _iter637 : struct.locks) { - _iter629.write(oprot); + _iter637.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse s if (struct.isSetLocks()) { { oprot.writeI32(struct.locks.size()); - for (ShowLocksResponseElement _iter630 : struct.locks) + for (ShowLocksResponseElement _iter638 : struct.locks) { - _iter630.write(oprot); + _iter638.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list631 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.locks = new ArrayList(_list631.size); - ShowLocksResponseElement _elem632; - for (int _i633 = 0; _i633 < _list631.size; ++_i633) + org.apache.thrift.protocol.TList _list639 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.locks = new ArrayList(_list639.size); + ShowLocksResponseElement _elem640; + for (int _i641 = 0; _i641 < _list639.size; ++_i641) { - _elem632 = new ShowLocksResponseElement(); - _elem632.read(iprot); - struct.locks.add(_elem632); + _elem640 = new ShowLocksResponseElement(); + _elem640.read(iprot); + struct.locks.add(_elem640); } } struct.setLocksIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java index a391f94213..89fe49a2d2 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java @@ -557,13 +557,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SkewedInfo struct) case 1: // SKEWED_COL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list114 = iprot.readListBegin(); - struct.skewedColNames = new ArrayList(_list114.size); - String _elem115; - for (int _i116 = 0; _i116 < _list114.size; ++_i116) + org.apache.thrift.protocol.TList _list122 = iprot.readListBegin(); + struct.skewedColNames = new ArrayList(_list122.size); + String _elem123; + for (int _i124 = 0; _i124 < _list122.size; ++_i124) { - _elem115 = iprot.readString(); - struct.skewedColNames.add(_elem115); + _elem123 = iprot.readString(); + struct.skewedColNames.add(_elem123); } iprot.readListEnd(); } @@ -575,23 +575,23 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SkewedInfo struct) case 2: // SKEWED_COL_VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list117 = iprot.readListBegin(); - struct.skewedColValues = new ArrayList>(_list117.size); - List _elem118; - for (int _i119 = 0; _i119 < _list117.size; ++_i119) + org.apache.thrift.protocol.TList _list125 = iprot.readListBegin(); + struct.skewedColValues = new ArrayList>(_list125.size); + List _elem126; + for (int _i127 = 0; _i127 < _list125.size; ++_i127) { { - org.apache.thrift.protocol.TList _list120 = iprot.readListBegin(); - _elem118 = new ArrayList(_list120.size); - String _elem121; - for (int _i122 = 0; _i122 < _list120.size; ++_i122) + org.apache.thrift.protocol.TList _list128 = iprot.readListBegin(); + _elem126 = new ArrayList(_list128.size); + String _elem129; + for (int _i130 = 0; _i130 < _list128.size; ++_i130) { - _elem121 = iprot.readString(); - _elem118.add(_elem121); + _elem129 = iprot.readString(); + _elem126.add(_elem129); } iprot.readListEnd(); } - struct.skewedColValues.add(_elem118); + struct.skewedColValues.add(_elem126); } iprot.readListEnd(); } @@ -603,25 +603,25 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SkewedInfo struct) case 3: // SKEWED_COL_VALUE_LOCATION_MAPS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map123 = iprot.readMapBegin(); - struct.skewedColValueLocationMaps = new HashMap,String>(2*_map123.size); - List _key124; - String _val125; - for (int _i126 = 0; _i126 < _map123.size; ++_i126) + org.apache.thrift.protocol.TMap _map131 = iprot.readMapBegin(); + struct.skewedColValueLocationMaps = new HashMap,String>(2*_map131.size); + List _key132; + String _val133; + for (int _i134 = 0; _i134 < _map131.size; ++_i134) { { - org.apache.thrift.protocol.TList _list127 = iprot.readListBegin(); - _key124 = new ArrayList(_list127.size); - String _elem128; - for (int _i129 = 0; _i129 < _list127.size; ++_i129) + org.apache.thrift.protocol.TList _list135 = iprot.readListBegin(); + _key132 = new ArrayList(_list135.size); + String _elem136; + for (int _i137 = 0; _i137 < _list135.size; ++_i137) { - _elem128 = iprot.readString(); - _key124.add(_elem128); + _elem136 = iprot.readString(); + _key132.add(_elem136); } iprot.readListEnd(); } - _val125 = iprot.readString(); - struct.skewedColValueLocationMaps.put(_key124, _val125); + _val133 = iprot.readString(); + struct.skewedColValueLocationMaps.put(_key132, _val133); } iprot.readMapEnd(); } @@ -647,9 +647,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SkewedInfo struct) oprot.writeFieldBegin(SKEWED_COL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.skewedColNames.size())); - for (String _iter130 : struct.skewedColNames) + for (String _iter138 : struct.skewedColNames) { - oprot.writeString(_iter130); + oprot.writeString(_iter138); } oprot.writeListEnd(); } @@ -659,13 +659,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SkewedInfo struct) oprot.writeFieldBegin(SKEWED_COL_VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.LIST, struct.skewedColValues.size())); - for (List _iter131 : struct.skewedColValues) + for (List _iter139 : struct.skewedColValues) { { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, _iter131.size())); - for (String _iter132 : _iter131) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, _iter139.size())); + for (String _iter140 : _iter139) { - oprot.writeString(_iter132); + oprot.writeString(_iter140); } oprot.writeListEnd(); } @@ -678,17 +678,17 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SkewedInfo struct) oprot.writeFieldBegin(SKEWED_COL_VALUE_LOCATION_MAPS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.STRING, struct.skewedColValueLocationMaps.size())); - for (Map.Entry, String> _iter133 : struct.skewedColValueLocationMaps.entrySet()) + for (Map.Entry, String> _iter141 : struct.skewedColValueLocationMaps.entrySet()) { { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, _iter133.getKey().size())); - for (String _iter134 : _iter133.getKey()) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, _iter141.getKey().size())); + for (String _iter142 : _iter141.getKey()) { - oprot.writeString(_iter134); + oprot.writeString(_iter142); } oprot.writeListEnd(); } - oprot.writeString(_iter133.getValue()); + oprot.writeString(_iter141.getValue()); } oprot.writeMapEnd(); } @@ -725,22 +725,22 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SkewedInfo struct) if (struct.isSetSkewedColNames()) { { oprot.writeI32(struct.skewedColNames.size()); - for (String _iter135 : struct.skewedColNames) + for (String _iter143 : struct.skewedColNames) { - oprot.writeString(_iter135); + oprot.writeString(_iter143); } } } if (struct.isSetSkewedColValues()) { { oprot.writeI32(struct.skewedColValues.size()); - for (List _iter136 : struct.skewedColValues) + for (List _iter144 : struct.skewedColValues) { { - oprot.writeI32(_iter136.size()); - for (String _iter137 : _iter136) + oprot.writeI32(_iter144.size()); + for (String _iter145 : _iter144) { - oprot.writeString(_iter137); + oprot.writeString(_iter145); } } } @@ -749,16 +749,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SkewedInfo struct) if (struct.isSetSkewedColValueLocationMaps()) { { oprot.writeI32(struct.skewedColValueLocationMaps.size()); - for (Map.Entry, String> _iter138 : struct.skewedColValueLocationMaps.entrySet()) + for (Map.Entry, String> _iter146 : struct.skewedColValueLocationMaps.entrySet()) { { - oprot.writeI32(_iter138.getKey().size()); - for (String _iter139 : _iter138.getKey()) + oprot.writeI32(_iter146.getKey().size()); + for (String _iter147 : _iter146.getKey()) { - oprot.writeString(_iter139); + oprot.writeString(_iter147); } } - oprot.writeString(_iter138.getValue()); + oprot.writeString(_iter146.getValue()); } } } @@ -770,59 +770,59 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SkewedInfo struct) t BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list140 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.skewedColNames = new ArrayList(_list140.size); - String _elem141; - for (int _i142 = 0; _i142 < _list140.size; ++_i142) + org.apache.thrift.protocol.TList _list148 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.skewedColNames = new ArrayList(_list148.size); + String _elem149; + for (int _i150 = 0; _i150 < _list148.size; ++_i150) { - _elem141 = iprot.readString(); - struct.skewedColNames.add(_elem141); + _elem149 = iprot.readString(); + struct.skewedColNames.add(_elem149); } } struct.setSkewedColNamesIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list143 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.LIST, iprot.readI32()); - struct.skewedColValues = new ArrayList>(_list143.size); - List _elem144; - for (int _i145 = 0; _i145 < _list143.size; ++_i145) + org.apache.thrift.protocol.TList _list151 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.LIST, iprot.readI32()); + struct.skewedColValues = new ArrayList>(_list151.size); + List _elem152; + for (int _i153 = 0; _i153 < _list151.size; ++_i153) { { - org.apache.thrift.protocol.TList _list146 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - _elem144 = new ArrayList(_list146.size); - String _elem147; - for (int _i148 = 0; _i148 < _list146.size; ++_i148) + org.apache.thrift.protocol.TList _list154 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + _elem152 = new ArrayList(_list154.size); + String _elem155; + for (int _i156 = 0; _i156 < _list154.size; ++_i156) { - _elem147 = iprot.readString(); - _elem144.add(_elem147); + _elem155 = iprot.readString(); + _elem152.add(_elem155); } } - struct.skewedColValues.add(_elem144); + struct.skewedColValues.add(_elem152); } } struct.setSkewedColValuesIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map149 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.skewedColValueLocationMaps = new HashMap,String>(2*_map149.size); - List _key150; - String _val151; - for (int _i152 = 0; _i152 < _map149.size; ++_i152) + org.apache.thrift.protocol.TMap _map157 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.skewedColValueLocationMaps = new HashMap,String>(2*_map157.size); + List _key158; + String _val159; + for (int _i160 = 0; _i160 < _map157.size; ++_i160) { { - org.apache.thrift.protocol.TList _list153 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - _key150 = new ArrayList(_list153.size); - String _elem154; - for (int _i155 = 0; _i155 < _list153.size; ++_i155) + org.apache.thrift.protocol.TList _list161 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + _key158 = new ArrayList(_list161.size); + String _elem162; + for (int _i163 = 0; _i163 < _list161.size; ++_i163) { - _elem154 = iprot.readString(); - _key150.add(_elem154); + _elem162 = iprot.readString(); + _key158.add(_elem162); } } - _val151 = iprot.readString(); - struct.skewedColValueLocationMaps.put(_key150, _val151); + _val159 = iprot.readString(); + struct.skewedColValueLocationMaps.put(_key158, _val159); } } struct.setSkewedColValueLocationMapsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java index 385a0159c8..00e60417ff 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java @@ -1290,14 +1290,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, StorageDescriptor s case 1: // COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list156 = iprot.readListBegin(); - struct.cols = new ArrayList(_list156.size); - FieldSchema _elem157; - for (int _i158 = 0; _i158 < _list156.size; ++_i158) + org.apache.thrift.protocol.TList _list164 = iprot.readListBegin(); + struct.cols = new ArrayList(_list164.size); + FieldSchema _elem165; + for (int _i166 = 0; _i166 < _list164.size; ++_i166) { - _elem157 = new FieldSchema(); - _elem157.read(iprot); - struct.cols.add(_elem157); + _elem165 = new FieldSchema(); + _elem165.read(iprot); + struct.cols.add(_elem165); } iprot.readListEnd(); } @@ -1358,13 +1358,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, StorageDescriptor s case 8: // BUCKET_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list159 = iprot.readListBegin(); - struct.bucketCols = new ArrayList(_list159.size); - String _elem160; - for (int _i161 = 0; _i161 < _list159.size; ++_i161) + org.apache.thrift.protocol.TList _list167 = iprot.readListBegin(); + struct.bucketCols = new ArrayList(_list167.size); + String _elem168; + for (int _i169 = 0; _i169 < _list167.size; ++_i169) { - _elem160 = iprot.readString(); - struct.bucketCols.add(_elem160); + _elem168 = iprot.readString(); + struct.bucketCols.add(_elem168); } iprot.readListEnd(); } @@ -1376,14 +1376,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, StorageDescriptor s case 9: // SORT_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list162 = iprot.readListBegin(); - struct.sortCols = new ArrayList(_list162.size); - Order _elem163; - for (int _i164 = 0; _i164 < _list162.size; ++_i164) + org.apache.thrift.protocol.TList _list170 = iprot.readListBegin(); + struct.sortCols = new ArrayList(_list170.size); + Order _elem171; + for (int _i172 = 0; _i172 < _list170.size; ++_i172) { - _elem163 = new Order(); - _elem163.read(iprot); - struct.sortCols.add(_elem163); + _elem171 = new Order(); + _elem171.read(iprot); + struct.sortCols.add(_elem171); } iprot.readListEnd(); } @@ -1395,15 +1395,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, StorageDescriptor s case 10: // PARAMETERS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map165 = iprot.readMapBegin(); - struct.parameters = new HashMap(2*_map165.size); - String _key166; - String _val167; - for (int _i168 = 0; _i168 < _map165.size; ++_i168) + org.apache.thrift.protocol.TMap _map173 = iprot.readMapBegin(); + struct.parameters = new HashMap(2*_map173.size); + String _key174; + String _val175; + for (int _i176 = 0; _i176 < _map173.size; ++_i176) { - _key166 = iprot.readString(); - _val167 = iprot.readString(); - struct.parameters.put(_key166, _val167); + _key174 = iprot.readString(); + _val175 = iprot.readString(); + struct.parameters.put(_key174, _val175); } iprot.readMapEnd(); } @@ -1446,9 +1446,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, StorageDescriptor oprot.writeFieldBegin(COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.cols.size())); - for (FieldSchema _iter169 : struct.cols) + for (FieldSchema _iter177 : struct.cols) { - _iter169.write(oprot); + _iter177.write(oprot); } oprot.writeListEnd(); } @@ -1484,9 +1484,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, StorageDescriptor oprot.writeFieldBegin(BUCKET_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.bucketCols.size())); - for (String _iter170 : struct.bucketCols) + for (String _iter178 : struct.bucketCols) { - oprot.writeString(_iter170); + oprot.writeString(_iter178); } oprot.writeListEnd(); } @@ -1496,9 +1496,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, StorageDescriptor oprot.writeFieldBegin(SORT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.sortCols.size())); - for (Order _iter171 : struct.sortCols) + for (Order _iter179 : struct.sortCols) { - _iter171.write(oprot); + _iter179.write(oprot); } oprot.writeListEnd(); } @@ -1508,10 +1508,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, StorageDescriptor oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size())); - for (Map.Entry _iter172 : struct.parameters.entrySet()) + for (Map.Entry _iter180 : struct.parameters.entrySet()) { - oprot.writeString(_iter172.getKey()); - oprot.writeString(_iter172.getValue()); + oprot.writeString(_iter180.getKey()); + oprot.writeString(_iter180.getValue()); } oprot.writeMapEnd(); } @@ -1587,9 +1587,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, StorageDescriptor s if (struct.isSetCols()) { { oprot.writeI32(struct.cols.size()); - for (FieldSchema _iter173 : struct.cols) + for (FieldSchema _iter181 : struct.cols) { - _iter173.write(oprot); + _iter181.write(oprot); } } } @@ -1614,28 +1614,28 @@ public void write(org.apache.thrift.protocol.TProtocol prot, StorageDescriptor s if (struct.isSetBucketCols()) { { oprot.writeI32(struct.bucketCols.size()); - for (String _iter174 : struct.bucketCols) + for (String _iter182 : struct.bucketCols) { - oprot.writeString(_iter174); + oprot.writeString(_iter182); } } } if (struct.isSetSortCols()) { { oprot.writeI32(struct.sortCols.size()); - for (Order _iter175 : struct.sortCols) + for (Order _iter183 : struct.sortCols) { - _iter175.write(oprot); + _iter183.write(oprot); } } } if (struct.isSetParameters()) { { oprot.writeI32(struct.parameters.size()); - for (Map.Entry _iter176 : struct.parameters.entrySet()) + for (Map.Entry _iter184 : struct.parameters.entrySet()) { - oprot.writeString(_iter176.getKey()); - oprot.writeString(_iter176.getValue()); + oprot.writeString(_iter184.getKey()); + oprot.writeString(_iter184.getValue()); } } } @@ -1653,14 +1653,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, StorageDescriptor st BitSet incoming = iprot.readBitSet(12); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list177 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.cols = new ArrayList(_list177.size); - FieldSchema _elem178; - for (int _i179 = 0; _i179 < _list177.size; ++_i179) + org.apache.thrift.protocol.TList _list185 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.cols = new ArrayList(_list185.size); + FieldSchema _elem186; + for (int _i187 = 0; _i187 < _list185.size; ++_i187) { - _elem178 = new FieldSchema(); - _elem178.read(iprot); - struct.cols.add(_elem178); + _elem186 = new FieldSchema(); + _elem186.read(iprot); + struct.cols.add(_elem186); } } struct.setColsIsSet(true); @@ -1692,42 +1692,42 @@ public void read(org.apache.thrift.protocol.TProtocol prot, StorageDescriptor st } if (incoming.get(7)) { { - org.apache.thrift.protocol.TList _list180 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.bucketCols = new ArrayList(_list180.size); - String _elem181; - for (int _i182 = 0; _i182 < _list180.size; ++_i182) + org.apache.thrift.protocol.TList _list188 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.bucketCols = new ArrayList(_list188.size); + String _elem189; + for (int _i190 = 0; _i190 < _list188.size; ++_i190) { - _elem181 = iprot.readString(); - struct.bucketCols.add(_elem181); + _elem189 = iprot.readString(); + struct.bucketCols.add(_elem189); } } struct.setBucketColsIsSet(true); } if (incoming.get(8)) { { - org.apache.thrift.protocol.TList _list183 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.sortCols = new ArrayList(_list183.size); - Order _elem184; - for (int _i185 = 0; _i185 < _list183.size; ++_i185) + org.apache.thrift.protocol.TList _list191 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.sortCols = new ArrayList(_list191.size); + Order _elem192; + for (int _i193 = 0; _i193 < _list191.size; ++_i193) { - _elem184 = new Order(); - _elem184.read(iprot); - struct.sortCols.add(_elem184); + _elem192 = new Order(); + _elem192.read(iprot); + struct.sortCols.add(_elem192); } } struct.setSortColsIsSet(true); } if (incoming.get(9)) { { - org.apache.thrift.protocol.TMap _map186 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.parameters = new HashMap(2*_map186.size); - String _key187; - String _val188; - for (int _i189 = 0; _i189 < _map186.size; ++_i189) + org.apache.thrift.protocol.TMap _map194 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.parameters = new HashMap(2*_map194.size); + String _key195; + String _val196; + for (int _i197 = 0; _i197 < _map194.size; ++_i197) { - _key187 = iprot.readString(); - _val188 = iprot.readString(); - struct.parameters.put(_key187, _val188); + _key195 = iprot.readString(); + _val196 = iprot.readString(); + struct.parameters.put(_key195, _val196); } } struct.parameters = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(struct.parameters); struct.setParametersIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java index a132e5e838..8dfec980d9 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java @@ -54,6 +54,7 @@ private static final org.apache.thrift.protocol.TField TEMPORARY_FIELD_DESC = new org.apache.thrift.protocol.TField("temporary", org.apache.thrift.protocol.TType.BOOL, (short)14); private static final org.apache.thrift.protocol.TField REWRITE_ENABLED_FIELD_DESC = new org.apache.thrift.protocol.TField("rewriteEnabled", org.apache.thrift.protocol.TType.BOOL, (short)15); private static final org.apache.thrift.protocol.TField CREATION_METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("creationMetadata", org.apache.thrift.protocol.TType.STRUCT, (short)16); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)17); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -77,6 +78,7 @@ private boolean temporary; // optional private boolean rewriteEnabled; // optional private CreationMetadata creationMetadata; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -95,7 +97,8 @@ PRIVILEGES((short)13, "privileges"), TEMPORARY((short)14, "temporary"), REWRITE_ENABLED((short)15, "rewriteEnabled"), - CREATION_METADATA((short)16, "creationMetadata"); + CREATION_METADATA((short)16, "creationMetadata"), + CAT_NAME((short)17, "catName"); private static final Map byName = new HashMap(); @@ -142,6 +145,8 @@ public static _Fields findByThriftId(int fieldId) { return REWRITE_ENABLED; case 16: // CREATION_METADATA return CREATION_METADATA; + case 17: // CAT_NAME + return CAT_NAME; default: return null; } @@ -188,7 +193,7 @@ public String getFieldName() { private static final int __TEMPORARY_ISSET_ID = 3; private static final int __REWRITEENABLED_ISSET_ID = 4; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA}; + private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -227,6 +232,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.CREATION_METADATA, new org.apache.thrift.meta_data.FieldMetaData("creationMetadata", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT , "CreationMetadata"))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap); } @@ -316,6 +323,9 @@ public Table(Table other) { if (other.isSetCreationMetadata()) { this.creationMetadata = other.creationMetadata; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public Table deepCopy() { @@ -345,6 +355,7 @@ public void clear() { setRewriteEnabledIsSet(false); this.rewriteEnabled = false; this.creationMetadata = null; + this.catName = null; } public String getTableName() { @@ -736,6 +747,29 @@ public void setCreationMetadataIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TABLE_NAME: @@ -866,6 +900,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -919,6 +961,9 @@ public Object getFieldValue(_Fields field) { case CREATION_METADATA: return getCreationMetadata(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -962,6 +1007,8 @@ public boolean isSet(_Fields field) { return isSetRewriteEnabled(); case CREATION_METADATA: return isSetCreationMetadata(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -1123,6 +1170,15 @@ public boolean equals(Table that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -1210,6 +1266,11 @@ public int hashCode() { if (present_creationMetadata) list.add(creationMetadata); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -1381,6 +1442,16 @@ public int compareTo(Table other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1516,6 +1587,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1627,14 +1708,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw case 8: // PARTITION_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list190 = iprot.readListBegin(); - struct.partitionKeys = new ArrayList(_list190.size); - FieldSchema _elem191; - for (int _i192 = 0; _i192 < _list190.size; ++_i192) + org.apache.thrift.protocol.TList _list198 = iprot.readListBegin(); + struct.partitionKeys = new ArrayList(_list198.size); + FieldSchema _elem199; + for (int _i200 = 0; _i200 < _list198.size; ++_i200) { - _elem191 = new FieldSchema(); - _elem191.read(iprot); - struct.partitionKeys.add(_elem191); + _elem199 = new FieldSchema(); + _elem199.read(iprot); + struct.partitionKeys.add(_elem199); } iprot.readListEnd(); } @@ -1646,15 +1727,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw case 9: // PARAMETERS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map193 = iprot.readMapBegin(); - struct.parameters = new HashMap(2*_map193.size); - String _key194; - String _val195; - for (int _i196 = 0; _i196 < _map193.size; ++_i196) + org.apache.thrift.protocol.TMap _map201 = iprot.readMapBegin(); + struct.parameters = new HashMap(2*_map201.size); + String _key202; + String _val203; + for (int _i204 = 0; _i204 < _map201.size; ++_i204) { - _key194 = iprot.readString(); - _val195 = iprot.readString(); - struct.parameters.put(_key194, _val195); + _key202 = iprot.readString(); + _val203 = iprot.readString(); + struct.parameters.put(_key202, _val203); } iprot.readMapEnd(); } @@ -1721,6 +1802,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 17: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1767,9 +1856,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro oprot.writeFieldBegin(PARTITION_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionKeys.size())); - for (FieldSchema _iter197 : struct.partitionKeys) + for (FieldSchema _iter205 : struct.partitionKeys) { - _iter197.write(oprot); + _iter205.write(oprot); } oprot.writeListEnd(); } @@ -1779,10 +1868,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size())); - for (Map.Entry _iter198 : struct.parameters.entrySet()) + for (Map.Entry _iter206 : struct.parameters.entrySet()) { - oprot.writeString(_iter198.getKey()); - oprot.writeString(_iter198.getValue()); + oprot.writeString(_iter206.getKey()); + oprot.writeString(_iter206.getValue()); } oprot.writeMapEnd(); } @@ -1827,6 +1916,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1893,7 +1989,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetCreationMetadata()) { optionals.set(15); } - oprot.writeBitSet(optionals, 16); + if (struct.isSetCatName()) { + optionals.set(16); + } + oprot.writeBitSet(optionals, 17); if (struct.isSetTableName()) { oprot.writeString(struct.tableName); } @@ -1918,19 +2017,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetPartitionKeys()) { { oprot.writeI32(struct.partitionKeys.size()); - for (FieldSchema _iter199 : struct.partitionKeys) + for (FieldSchema _iter207 : struct.partitionKeys) { - _iter199.write(oprot); + _iter207.write(oprot); } } } if (struct.isSetParameters()) { { oprot.writeI32(struct.parameters.size()); - for (Map.Entry _iter200 : struct.parameters.entrySet()) + for (Map.Entry _iter208 : struct.parameters.entrySet()) { - oprot.writeString(_iter200.getKey()); - oprot.writeString(_iter200.getValue()); + oprot.writeString(_iter208.getKey()); + oprot.writeString(_iter208.getValue()); } } } @@ -1955,12 +2054,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetCreationMetadata()) { struct.creationMetadata.write(oprot); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(16); + BitSet incoming = iprot.readBitSet(17); if (incoming.get(0)) { struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); @@ -1992,29 +2094,29 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws } if (incoming.get(7)) { { - org.apache.thrift.protocol.TList _list201 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionKeys = new ArrayList(_list201.size); - FieldSchema _elem202; - for (int _i203 = 0; _i203 < _list201.size; ++_i203) + org.apache.thrift.protocol.TList _list209 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionKeys = new ArrayList(_list209.size); + FieldSchema _elem210; + for (int _i211 = 0; _i211 < _list209.size; ++_i211) { - _elem202 = new FieldSchema(); - _elem202.read(iprot); - struct.partitionKeys.add(_elem202); + _elem210 = new FieldSchema(); + _elem210.read(iprot); + struct.partitionKeys.add(_elem210); } } struct.setPartitionKeysIsSet(true); } if (incoming.get(8)) { { - org.apache.thrift.protocol.TMap _map204 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.parameters = new HashMap(2*_map204.size); - String _key205; - String _val206; - for (int _i207 = 0; _i207 < _map204.size; ++_i207) + org.apache.thrift.protocol.TMap _map212 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.parameters = new HashMap(2*_map212.size); + String _key213; + String _val214; + for (int _i215 = 0; _i215 < _map212.size; ++_i215) { - _key205 = iprot.readString(); - _val206 = iprot.readString(); - struct.parameters.put(_key205, _val206); + _key213 = iprot.readString(); + _val214 = iprot.readString(); + struct.parameters.put(_key213, _val214); } } struct.setParametersIsSet(true); @@ -2049,6 +2151,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws struct.creationMetadata.read(iprot); struct.setCreationMetadataIsSet(true); } + if (incoming.get(16)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableMeta.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableMeta.java index cadbaaab1c..9e20f6f2c6 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableMeta.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableMeta.java @@ -42,6 +42,7 @@ private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField TABLE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("tableType", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField COMMENTS_FIELD_DESC = new org.apache.thrift.protocol.TField("comments", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,13 +54,15 @@ private String tableName; // required private String tableType; // required private String comments; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "dbName"), TABLE_NAME((short)2, "tableName"), TABLE_TYPE((short)3, "tableType"), - COMMENTS((short)4, "comments"); + COMMENTS((short)4, "comments"), + CAT_NAME((short)5, "catName"); private static final Map byName = new HashMap(); @@ -82,6 +85,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLE_TYPE; case 4: // COMMENTS return COMMENTS; + case 5: // CAT_NAME + return CAT_NAME; default: return null; } @@ -122,7 +127,7 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.COMMENTS}; + private static final _Fields optionals[] = {_Fields.COMMENTS,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -134,6 +139,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.COMMENTS, new org.apache.thrift.meta_data.FieldMetaData("comments", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableMeta.class, metaDataMap); } @@ -168,6 +175,9 @@ public TableMeta(TableMeta other) { if (other.isSetComments()) { this.comments = other.comments; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public TableMeta deepCopy() { @@ -180,6 +190,7 @@ public void clear() { this.tableName = null; this.tableType = null; this.comments = null; + this.catName = null; } public String getDbName() { @@ -274,6 +285,29 @@ public void setCommentsIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -308,6 +342,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -325,6 +367,9 @@ public Object getFieldValue(_Fields field) { case COMMENTS: return getComments(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -344,6 +389,8 @@ public boolean isSet(_Fields field) { return isSetTableType(); case COMMENTS: return isSetComments(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -397,6 +444,15 @@ public boolean equals(TableMeta that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -424,6 +480,11 @@ public int hashCode() { if (present_comments) list.add(comments); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -475,6 +536,16 @@ public int compareTo(TableMeta other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -528,6 +599,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -615,6 +696,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableMeta struct) t org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -650,6 +739,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableMeta struct) oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -674,10 +770,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableMeta struct) t if (struct.isSetComments()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetComments()) { oprot.writeString(struct.comments); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -689,11 +791,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableMeta struct) th struct.setTableNameIsSet(true); struct.tableType = iprot.readString(); struct.setTableTypeIsSet(true); - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.comments = iprot.readString(); struct.setCommentsIsSet(true); } + if (incoming.get(1)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java index 6c7c9af797..a663a64c67 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java @@ -41,6 +41,7 @@ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -51,12 +52,14 @@ private String dbName; // required private String tblName; // required private List colNames; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "dbName"), TBL_NAME((short)2, "tblName"), - COL_NAMES((short)3, "colNames"); + COL_NAMES((short)3, "colNames"), + CAT_NAME((short)4, "catName"); private static final Map byName = new HashMap(); @@ -77,6 +80,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // COL_NAMES return COL_NAMES; + case 4: // CAT_NAME + return CAT_NAME; default: return null; } @@ -117,6 +122,7 @@ public String getFieldName() { } // isset id assignments + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -127,6 +133,8 @@ public String getFieldName() { tmpMap.put(_Fields.COL_NAMES, new org.apache.thrift.meta_data.FieldMetaData("colNames", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableStatsRequest.class, metaDataMap); } @@ -159,6 +167,9 @@ public TableStatsRequest(TableStatsRequest other) { List __this__colNames = new ArrayList(other.colNames); this.colNames = __this__colNames; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public TableStatsRequest deepCopy() { @@ -170,6 +181,7 @@ public void clear() { this.dbName = null; this.tblName = null; this.colNames = null; + this.catName = null; } public String getDbName() { @@ -256,6 +268,29 @@ public void setColNamesIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -282,6 +317,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -296,6 +339,9 @@ public Object getFieldValue(_Fields field) { case COL_NAMES: return getColNames(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -313,6 +359,8 @@ public boolean isSet(_Fields field) { return isSetTblName(); case COL_NAMES: return isSetColNames(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -357,6 +405,15 @@ public boolean equals(TableStatsRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -379,6 +436,11 @@ public int hashCode() { if (present_colNames) list.add(colNames); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -420,6 +482,16 @@ public int compareTo(TableStatsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -463,6 +535,16 @@ public String toString() { sb.append(this.colNames); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -537,13 +619,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsRequest s case 3: // COL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list442 = iprot.readListBegin(); - struct.colNames = new ArrayList(_list442.size); - String _elem443; - for (int _i444 = 0; _i444 < _list442.size; ++_i444) + org.apache.thrift.protocol.TList _list450 = iprot.readListBegin(); + struct.colNames = new ArrayList(_list450.size); + String _elem451; + for (int _i452 = 0; _i452 < _list450.size; ++_i452) { - _elem443 = iprot.readString(); - struct.colNames.add(_elem443); + _elem451 = iprot.readString(); + struct.colNames.add(_elem451); } iprot.readListEnd(); } @@ -552,6 +634,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsRequest s org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -579,14 +669,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsRequest oprot.writeFieldBegin(COL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.colNames.size())); - for (String _iter445 : struct.colNames) + for (String _iter453 : struct.colNames) { - oprot.writeString(_iter445); + oprot.writeString(_iter453); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -608,11 +705,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest s oprot.writeString(struct.tblName); { oprot.writeI32(struct.colNames.size()); - for (String _iter446 : struct.colNames) + for (String _iter454 : struct.colNames) { - oprot.writeString(_iter446); + oprot.writeString(_iter454); } } + BitSet optionals = new BitSet(); + if (struct.isSetCatName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -623,16 +728,21 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest st struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list447 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.colNames = new ArrayList(_list447.size); - String _elem448; - for (int _i449 = 0; _i449 < _list447.size; ++_i449) + org.apache.thrift.protocol.TList _list455 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.colNames = new ArrayList(_list455.size); + String _elem456; + for (int _i457 = 0; _i457 < _list455.size; ++_i457) { - _elem448 = iprot.readString(); - struct.colNames.add(_elem448); + _elem456 = iprot.readString(); + struct.colNames.add(_elem456); } } struct.setColNamesIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java index 789f91c372..dff7d5c204 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsResult st case 1: // TABLE_STATS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list416 = iprot.readListBegin(); - struct.tableStats = new ArrayList(_list416.size); - ColumnStatisticsObj _elem417; - for (int _i418 = 0; _i418 < _list416.size; ++_i418) + org.apache.thrift.protocol.TList _list424 = iprot.readListBegin(); + struct.tableStats = new ArrayList(_list424.size); + ColumnStatisticsObj _elem425; + for (int _i426 = 0; _i426 < _list424.size; ++_i426) { - _elem417 = new ColumnStatisticsObj(); - _elem417.read(iprot); - struct.tableStats.add(_elem417); + _elem425 = new ColumnStatisticsObj(); + _elem425.read(iprot); + struct.tableStats.add(_elem425); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsResult s oprot.writeFieldBegin(TABLE_STATS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tableStats.size())); - for (ColumnStatisticsObj _iter419 : struct.tableStats) + for (ColumnStatisticsObj _iter427 : struct.tableStats) { - _iter419.write(oprot); + _iter427.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsResult st TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tableStats.size()); - for (ColumnStatisticsObj _iter420 : struct.tableStats) + for (ColumnStatisticsObj _iter428 : struct.tableStats) { - _iter420.write(oprot); + _iter428.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsResult st public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list421 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tableStats = new ArrayList(_list421.size); - ColumnStatisticsObj _elem422; - for (int _i423 = 0; _i423 < _list421.size; ++_i423) + org.apache.thrift.protocol.TList _list429 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tableStats = new ArrayList(_list429.size); + ColumnStatisticsObj _elem430; + for (int _i431 = 0; _i431 < _list429.size; ++_i431) { - _elem422 = new ColumnStatisticsObj(); - _elem422.read(iprot); - struct.tableStats.add(_elem422); + _elem430 = new ColumnStatisticsObj(); + _elem430.read(iprot); + struct.tableStats.add(_elem430); } } struct.setTableStatsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableValidWriteIds.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableValidWriteIds.java index 893454e700..e0defbdeba 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableValidWriteIds.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableValidWriteIds.java @@ -708,13 +708,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableValidWriteIds case 3: // INVALID_WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list586 = iprot.readListBegin(); - struct.invalidWriteIds = new ArrayList(_list586.size); - long _elem587; - for (int _i588 = 0; _i588 < _list586.size; ++_i588) + org.apache.thrift.protocol.TList _list594 = iprot.readListBegin(); + struct.invalidWriteIds = new ArrayList(_list594.size); + long _elem595; + for (int _i596 = 0; _i596 < _list594.size; ++_i596) { - _elem587 = iprot.readI64(); - struct.invalidWriteIds.add(_elem587); + _elem595 = iprot.readI64(); + struct.invalidWriteIds.add(_elem595); } iprot.readListEnd(); } @@ -764,9 +764,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableValidWriteIds oprot.writeFieldBegin(INVALID_WRITE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.invalidWriteIds.size())); - for (long _iter589 : struct.invalidWriteIds) + for (long _iter597 : struct.invalidWriteIds) { - oprot.writeI64(_iter589); + oprot.writeI64(_iter597); } oprot.writeListEnd(); } @@ -803,9 +803,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableValidWriteIds oprot.writeI64(struct.writeIdHighWaterMark); { oprot.writeI32(struct.invalidWriteIds.size()); - for (long _iter590 : struct.invalidWriteIds) + for (long _iter598 : struct.invalidWriteIds) { - oprot.writeI64(_iter590); + oprot.writeI64(_iter598); } } oprot.writeBinary(struct.abortedBits); @@ -827,13 +827,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableValidWriteIds s struct.writeIdHighWaterMark = iprot.readI64(); struct.setWriteIdHighWaterMarkIsSet(true); { - org.apache.thrift.protocol.TList _list591 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.invalidWriteIds = new ArrayList(_list591.size); - long _elem592; - for (int _i593 = 0; _i593 < _list591.size; ++_i593) + org.apache.thrift.protocol.TList _list599 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.invalidWriteIds = new ArrayList(_list599.size); + long _elem600; + for (int _i601 = 0; _i601 < _list599.size; ++_i601) { - _elem592 = iprot.readI64(); - struct.invalidWriteIds.add(_elem592); + _elem600 = iprot.readI64(); + struct.invalidWriteIds.add(_elem600); } } struct.setInvalidWriteIdsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index e824f4a145..751eec4e85 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -46,6 +46,14 @@ public void setMetaConf(String key, String value) throws MetaException, org.apache.thrift.TException; + public void create_catalog(CreateCatalogRequest catalog) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException; + + public GetCatalogResponse get_catalog(GetCatalogRequest catName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + + public GetCatalogsResponse get_catalogs() throws MetaException, org.apache.thrift.TException; + + public void drop_catalog(DropCatalogRequest catName) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException; + public void create_database(Database database) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException; public Database get_database(String name) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; @@ -120,7 +128,7 @@ public Map get_materialization_invalidation_info(String dbname, List tbl_names) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException; - public void update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException; + public void update_creation_metadata(String catName, String dbname, String tbl_name, CreationMetadata creation_metadata) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException; public List get_table_names_by_filter(String dbname, String filter, short max_tables) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException; @@ -442,6 +450,14 @@ public void setMetaConf(String key, String value, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void create_catalog(CreateCatalogRequest catalog, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void get_catalog(GetCatalogRequest catName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void get_catalogs(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void drop_catalog(DropCatalogRequest catName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void create_database(Database database, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_database(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -516,7 +532,7 @@ public void get_materialization_invalidation_info(String dbname, List tbl_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void update_creation_metadata(String catName, String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_table_names_by_filter(String dbname, String filter, short max_tables, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -902,6 +918,118 @@ public void recv_setMetaConf() throws MetaException, org.apache.thrift.TExceptio return; } + public void create_catalog(CreateCatalogRequest catalog) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException + { + send_create_catalog(catalog); + recv_create_catalog(); + } + + public void send_create_catalog(CreateCatalogRequest catalog) throws org.apache.thrift.TException + { + create_catalog_args args = new create_catalog_args(); + args.setCatalog(catalog); + sendBase("create_catalog", args); + } + + public void recv_create_catalog() throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException + { + create_catalog_result result = new create_catalog_result(); + receiveBase(result, "create_catalog"); + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + return; + } + + public GetCatalogResponse get_catalog(GetCatalogRequest catName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + { + send_get_catalog(catName); + return recv_get_catalog(); + } + + public void send_get_catalog(GetCatalogRequest catName) throws org.apache.thrift.TException + { + get_catalog_args args = new get_catalog_args(); + args.setCatName(catName); + sendBase("get_catalog", args); + } + + public GetCatalogResponse recv_get_catalog() throws NoSuchObjectException, MetaException, org.apache.thrift.TException + { + get_catalog_result result = new get_catalog_result(); + receiveBase(result, "get_catalog"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_catalog failed: unknown result"); + } + + public GetCatalogsResponse get_catalogs() throws MetaException, org.apache.thrift.TException + { + send_get_catalogs(); + return recv_get_catalogs(); + } + + public void send_get_catalogs() throws org.apache.thrift.TException + { + get_catalogs_args args = new get_catalogs_args(); + sendBase("get_catalogs", args); + } + + public GetCatalogsResponse recv_get_catalogs() throws MetaException, org.apache.thrift.TException + { + get_catalogs_result result = new get_catalogs_result(); + receiveBase(result, "get_catalogs"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_catalogs failed: unknown result"); + } + + public void drop_catalog(DropCatalogRequest catName) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException + { + send_drop_catalog(catName); + recv_drop_catalog(); + } + + public void send_drop_catalog(DropCatalogRequest catName) throws org.apache.thrift.TException + { + drop_catalog_args args = new drop_catalog_args(); + args.setCatName(catName); + sendBase("drop_catalog", args); + } + + public void recv_drop_catalog() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException + { + drop_catalog_result result = new drop_catalog_result(); + receiveBase(result, "drop_catalog"); + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + return; + } + public void create_database(Database database) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException { send_create_database(database); @@ -1969,15 +2097,16 @@ public void send_get_materialization_invalidation_info(String dbname, List Map> getProcessMap(Map> processMap) { processMap.put("getMetaConf", new getMetaConf()); processMap.put("setMetaConf", new setMetaConf()); + processMap.put("create_catalog", new create_catalog()); + processMap.put("get_catalog", new get_catalog()); + processMap.put("get_catalogs", new get_catalogs()); + processMap.put("drop_catalog", new drop_catalog()); processMap.put("create_database", new create_database()); processMap.put("get_database", new get_database()); processMap.put("drop_database", new drop_database()); @@ -13529,6 +13790,112 @@ public setMetaConf_result getResult(I iface, setMetaConf_args args) throws org.a } } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_catalog extends org.apache.thrift.ProcessFunction { + public create_catalog() { + super("create_catalog"); + } + + public create_catalog_args getEmptyArgsInstance() { + return new create_catalog_args(); + } + + protected boolean isOneway() { + return false; + } + + public create_catalog_result getResult(I iface, create_catalog_args args) throws org.apache.thrift.TException { + create_catalog_result result = new create_catalog_result(); + try { + iface.create_catalog(args.catalog); + } catch (AlreadyExistsException o1) { + result.o1 = o1; + } catch (InvalidObjectException o2) { + result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_catalog extends org.apache.thrift.ProcessFunction { + public get_catalog() { + super("get_catalog"); + } + + public get_catalog_args getEmptyArgsInstance() { + return new get_catalog_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_catalog_result getResult(I iface, get_catalog_args args) throws org.apache.thrift.TException { + get_catalog_result result = new get_catalog_result(); + try { + result.success = iface.get_catalog(args.catName); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (MetaException o2) { + result.o2 = o2; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_catalogs extends org.apache.thrift.ProcessFunction { + public get_catalogs() { + super("get_catalogs"); + } + + public get_catalogs_args getEmptyArgsInstance() { + return new get_catalogs_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_catalogs_result getResult(I iface, get_catalogs_args args) throws org.apache.thrift.TException { + get_catalogs_result result = new get_catalogs_result(); + try { + result.success = iface.get_catalogs(); + } catch (MetaException o1) { + result.o1 = o1; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_catalog extends org.apache.thrift.ProcessFunction { + public drop_catalog() { + super("drop_catalog"); + } + + public drop_catalog_args getEmptyArgsInstance() { + return new drop_catalog_args(); + } + + protected boolean isOneway() { + return false; + } + + public drop_catalog_result getResult(I iface, drop_catalog_args args) throws org.apache.thrift.TException { + drop_catalog_result result = new drop_catalog_result(); + try { + iface.drop_catalog(args.catName); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (InvalidOperationException o2) { + result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; + } + return result; + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_database extends org.apache.thrift.ProcessFunction { public create_database() { super("create_database"); @@ -14515,7 +14882,7 @@ protected boolean isOneway() { public update_creation_metadata_result getResult(I iface, update_creation_metadata_args args) throws org.apache.thrift.TException { update_creation_metadata_result result = new update_creation_metadata_result(); try { - iface.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata); + iface.update_creation_metadata(args.catName, args.dbname, args.tbl_name, args.creation_metadata); } catch (MetaException o1) { result.o1 = o1; } catch (InvalidOperationException o2) { @@ -18539,6 +18906,10 @@ protected AsyncProcessor(I iface, Map Map> getProcessMap(Map> processMap) { processMap.put("getMetaConf", new getMetaConf()); processMap.put("setMetaConf", new setMetaConf()); + processMap.put("create_catalog", new create_catalog()); + processMap.put("get_catalog", new get_catalog()); + processMap.put("get_catalogs", new get_catalogs()); + processMap.put("drop_catalog", new drop_catalog()); processMap.put("create_database", new create_database()); processMap.put("get_database", new get_database()); processMap.put("drop_database", new drop_database()); @@ -18849,20 +19220,20 @@ public void start(I iface, setMetaConf_args args, org.apache.thrift.async.AsyncM } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_database extends org.apache.thrift.AsyncProcessFunction { - public create_database() { - super("create_database"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_catalog extends org.apache.thrift.AsyncProcessFunction { + public create_catalog() { + super("create_catalog"); } - public create_database_args getEmptyArgsInstance() { - return new create_database_args(); + public create_catalog_args getEmptyArgsInstance() { + return new create_catalog_args(); } public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new AsyncMethodCallback() { public void onComplete(Void o) { - create_database_result result = new create_database_result(); + create_catalog_result result = new create_catalog_result(); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -18874,7 +19245,7 @@ public void onComplete(Void o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - create_database_result result = new create_database_result(); + create_catalog_result result = new create_catalog_result(); if (e instanceof AlreadyExistsException) { result.o1 = (AlreadyExistsException) e; result.setO1IsSet(true); @@ -18910,25 +19281,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, create_database_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.create_database(args.database,resultHandler); + public void start(I iface, create_catalog_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.create_catalog(args.catalog,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_database extends org.apache.thrift.AsyncProcessFunction { - public get_database() { - super("get_database"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_catalog extends org.apache.thrift.AsyncProcessFunction { + public get_catalog() { + super("get_catalog"); } - public get_database_args getEmptyArgsInstance() { - return new get_database_args(); + public get_catalog_args getEmptyArgsInstance() { + return new get_catalog_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Database o) { - get_database_result result = new get_database_result(); + return new AsyncMethodCallback() { + public void onComplete(GetCatalogResponse o) { + get_catalog_result result = new get_catalog_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -18941,7 +19312,258 @@ public void onComplete(Database o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_database_result result = new get_database_result(); + get_catalog_result result = new get_catalog_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_catalog_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_catalog(args.catName,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_catalogs extends org.apache.thrift.AsyncProcessFunction { + public get_catalogs() { + super("get_catalogs"); + } + + public get_catalogs_args getEmptyArgsInstance() { + return new get_catalogs_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(GetCatalogsResponse o) { + get_catalogs_result result = new get_catalogs_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_catalogs_result result = new get_catalogs_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_catalogs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_catalogs(resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_catalog extends org.apache.thrift.AsyncProcessFunction { + public drop_catalog() { + super("drop_catalog"); + } + + public drop_catalog_args getEmptyArgsInstance() { + return new drop_catalog_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + drop_catalog_result result = new drop_catalog_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + drop_catalog_result result = new drop_catalog_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof InvalidOperationException) { + result.o2 = (InvalidOperationException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; + result.setO3IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, drop_catalog_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.drop_catalog(args.catName,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_database extends org.apache.thrift.AsyncProcessFunction { + public create_database() { + super("create_database"); + } + + public create_database_args getEmptyArgsInstance() { + return new create_database_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + create_database_result result = new create_database_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + create_database_result result = new create_database_result(); + if (e instanceof AlreadyExistsException) { + result.o1 = (AlreadyExistsException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof InvalidObjectException) { + result.o2 = (InvalidObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; + result.setO3IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, create_database_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.create_database(args.database,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_database extends org.apache.thrift.AsyncProcessFunction { + public get_database() { + super("get_database"); + } + + public get_database_args getEmptyArgsInstance() { + return new get_database_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Database o) { + get_database_result result = new get_database_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_database_result result = new get_database_result(); if (e instanceof NoSuchObjectException) { result.o1 = (NoSuchObjectException) e; result.setO1IsSet(true); @@ -21210,7 +21832,7 @@ protected boolean isOneway() { } public void start(I iface, update_creation_metadata_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata,resultHandler); + iface.update_creation_metadata(args.catName, args.dbname, args.tbl_name, args.creation_metadata,resultHandler); } } @@ -31439,7 +32061,3412 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("getMetaConf_result("); + StringBuilder sb = new StringBuilder("getMetaConf_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getMetaConf_resultStandardSchemeFactory implements SchemeFactory { + public getMetaConf_resultStandardScheme getScheme() { + return new getMetaConf_resultStandardScheme(); + } + } + + private static class getMetaConf_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, getMetaConf_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.success = iprot.readString(); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, getMetaConf_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeString(struct.success); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getMetaConf_resultTupleSchemeFactory implements SchemeFactory { + public getMetaConf_resultTupleScheme getScheme() { + return new getMetaConf_resultTupleScheme(); + } + } + + private static class getMetaConf_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getMetaConf_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + oprot.writeString(struct.success); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.success = iprot.readString(); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class setMetaConf_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("setMetaConf_args"); + + private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new setMetaConf_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new setMetaConf_argsTupleSchemeFactory()); + } + + private String key; // required + private String value; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + KEY((short)1, "key"), + VALUE((short)2, "value"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // KEY + return KEY; + case 2: // VALUE + return VALUE; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setMetaConf_args.class, metaDataMap); + } + + public setMetaConf_args() { + } + + public setMetaConf_args( + String key, + String value) + { + this(); + this.key = key; + this.value = value; + } + + /** + * Performs a deep copy on other. + */ + public setMetaConf_args(setMetaConf_args other) { + if (other.isSetKey()) { + this.key = other.key; + } + if (other.isSetValue()) { + this.value = other.value; + } + } + + public setMetaConf_args deepCopy() { + return new setMetaConf_args(this); + } + + @Override + public void clear() { + this.key = null; + this.value = null; + } + + public String getKey() { + return this.key; + } + + public void setKey(String key) { + this.key = key; + } + + public void unsetKey() { + this.key = null; + } + + /** Returns true if field key is set (has been assigned a value) and false otherwise */ + public boolean isSetKey() { + return this.key != null; + } + + public void setKeyIsSet(boolean value) { + if (!value) { + this.key = null; + } + } + + public String getValue() { + return this.value; + } + + public void setValue(String value) { + this.value = value; + } + + public void unsetValue() { + this.value = null; + } + + /** Returns true if field value is set (has been assigned a value) and false otherwise */ + public boolean isSetValue() { + return this.value != null; + } + + public void setValueIsSet(boolean value) { + if (!value) { + this.value = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case KEY: + if (value == null) { + unsetKey(); + } else { + setKey((String)value); + } + break; + + case VALUE: + if (value == null) { + unsetValue(); + } else { + setValue((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case KEY: + return getKey(); + + case VALUE: + return getValue(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case KEY: + return isSetKey(); + case VALUE: + return isSetValue(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof setMetaConf_args) + return this.equals((setMetaConf_args)that); + return false; + } + + public boolean equals(setMetaConf_args that) { + if (that == null) + return false; + + boolean this_present_key = true && this.isSetKey(); + boolean that_present_key = true && that.isSetKey(); + if (this_present_key || that_present_key) { + if (!(this_present_key && that_present_key)) + return false; + if (!this.key.equals(that.key)) + return false; + } + + boolean this_present_value = true && this.isSetValue(); + boolean that_present_value = true && that.isSetValue(); + if (this_present_value || that_present_value) { + if (!(this_present_value && that_present_value)) + return false; + if (!this.value.equals(that.value)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_key = true && (isSetKey()); + list.add(present_key); + if (present_key) + list.add(key); + + boolean present_value = true && (isSetValue()); + list.add(present_value); + if (present_value) + list.add(value); + + return list.hashCode(); + } + + @Override + public int compareTo(setMetaConf_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetKey()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValue()).compareTo(other.isSetValue()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValue()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("setMetaConf_args("); + boolean first = true; + + sb.append("key:"); + if (this.key == null) { + sb.append("null"); + } else { + sb.append(this.key); + } + first = false; + if (!first) sb.append(", "); + sb.append("value:"); + if (this.value == null) { + sb.append("null"); + } else { + sb.append(this.value); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class setMetaConf_argsStandardSchemeFactory implements SchemeFactory { + public setMetaConf_argsStandardScheme getScheme() { + return new setMetaConf_argsStandardScheme(); + } + } + + private static class setMetaConf_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, setMetaConf_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // KEY + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.key = iprot.readString(); + struct.setKeyIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // VALUE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.value = iprot.readString(); + struct.setValueIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, setMetaConf_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.key != null) { + oprot.writeFieldBegin(KEY_FIELD_DESC); + oprot.writeString(struct.key); + oprot.writeFieldEnd(); + } + if (struct.value != null) { + oprot.writeFieldBegin(VALUE_FIELD_DESC); + oprot.writeString(struct.value); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class setMetaConf_argsTupleSchemeFactory implements SchemeFactory { + public setMetaConf_argsTupleScheme getScheme() { + return new setMetaConf_argsTupleScheme(); + } + } + + private static class setMetaConf_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, setMetaConf_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetKey()) { + optionals.set(0); + } + if (struct.isSetValue()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetKey()) { + oprot.writeString(struct.key); + } + if (struct.isSetValue()) { + oprot.writeString(struct.value); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, setMetaConf_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.key = iprot.readString(); + struct.setKeyIsSet(true); + } + if (incoming.get(1)) { + struct.value = iprot.readString(); + struct.setValueIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class setMetaConf_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("setMetaConf_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new setMetaConf_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new setMetaConf_resultTupleSchemeFactory()); + } + + private MetaException o1; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setMetaConf_result.class, metaDataMap); + } + + public setMetaConf_result() { + } + + public setMetaConf_result( + MetaException o1) + { + this(); + this.o1 = o1; + } + + /** + * Performs a deep copy on other. + */ + public setMetaConf_result(setMetaConf_result other) { + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + } + + public setMetaConf_result deepCopy() { + return new setMetaConf_result(this); + } + + @Override + public void clear() { + this.o1 = null; + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof setMetaConf_result) + return this.equals((setMetaConf_result)that); + return false; + } + + public boolean equals(setMetaConf_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + return list.hashCode(); + } + + @Override + public int compareTo(setMetaConf_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("setMetaConf_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class setMetaConf_resultStandardSchemeFactory implements SchemeFactory { + public setMetaConf_resultStandardScheme getScheme() { + return new setMetaConf_resultStandardScheme(); + } + } + + private static class setMetaConf_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, setMetaConf_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, setMetaConf_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class setMetaConf_resultTupleSchemeFactory implements SchemeFactory { + public setMetaConf_resultTupleScheme getScheme() { + return new setMetaConf_resultTupleScheme(); + } + } + + private static class setMetaConf_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, setMetaConf_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, setMetaConf_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_catalog_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_catalog_args"); + + private static final org.apache.thrift.protocol.TField CATALOG_FIELD_DESC = new org.apache.thrift.protocol.TField("catalog", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new create_catalog_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new create_catalog_argsTupleSchemeFactory()); + } + + private CreateCatalogRequest catalog; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + CATALOG((short)1, "catalog"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // CATALOG + return CATALOG; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CATALOG, new org.apache.thrift.meta_data.FieldMetaData("catalog", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CreateCatalogRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_catalog_args.class, metaDataMap); + } + + public create_catalog_args() { + } + + public create_catalog_args( + CreateCatalogRequest catalog) + { + this(); + this.catalog = catalog; + } + + /** + * Performs a deep copy on other. + */ + public create_catalog_args(create_catalog_args other) { + if (other.isSetCatalog()) { + this.catalog = new CreateCatalogRequest(other.catalog); + } + } + + public create_catalog_args deepCopy() { + return new create_catalog_args(this); + } + + @Override + public void clear() { + this.catalog = null; + } + + public CreateCatalogRequest getCatalog() { + return this.catalog; + } + + public void setCatalog(CreateCatalogRequest catalog) { + this.catalog = catalog; + } + + public void unsetCatalog() { + this.catalog = null; + } + + /** Returns true if field catalog is set (has been assigned a value) and false otherwise */ + public boolean isSetCatalog() { + return this.catalog != null; + } + + public void setCatalogIsSet(boolean value) { + if (!value) { + this.catalog = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case CATALOG: + if (value == null) { + unsetCatalog(); + } else { + setCatalog((CreateCatalogRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case CATALOG: + return getCatalog(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case CATALOG: + return isSetCatalog(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof create_catalog_args) + return this.equals((create_catalog_args)that); + return false; + } + + public boolean equals(create_catalog_args that) { + if (that == null) + return false; + + boolean this_present_catalog = true && this.isSetCatalog(); + boolean that_present_catalog = true && that.isSetCatalog(); + if (this_present_catalog || that_present_catalog) { + if (!(this_present_catalog && that_present_catalog)) + return false; + if (!this.catalog.equals(that.catalog)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_catalog = true && (isSetCatalog()); + list.add(present_catalog); + if (present_catalog) + list.add(catalog); + + return list.hashCode(); + } + + @Override + public int compareTo(create_catalog_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetCatalog()).compareTo(other.isSetCatalog()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatalog()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalog, other.catalog); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("create_catalog_args("); + boolean first = true; + + sb.append("catalog:"); + if (this.catalog == null) { + sb.append("null"); + } else { + sb.append(this.catalog); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (catalog != null) { + catalog.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class create_catalog_argsStandardSchemeFactory implements SchemeFactory { + public create_catalog_argsStandardScheme getScheme() { + return new create_catalog_argsStandardScheme(); + } + } + + private static class create_catalog_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, create_catalog_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // CATALOG + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.catalog = new CreateCatalogRequest(); + struct.catalog.read(iprot); + struct.setCatalogIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, create_catalog_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.catalog != null) { + oprot.writeFieldBegin(CATALOG_FIELD_DESC); + struct.catalog.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class create_catalog_argsTupleSchemeFactory implements SchemeFactory { + public create_catalog_argsTupleScheme getScheme() { + return new create_catalog_argsTupleScheme(); + } + } + + private static class create_catalog_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, create_catalog_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetCatalog()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatalog()) { + struct.catalog.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, create_catalog_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catalog = new CreateCatalogRequest(); + struct.catalog.read(iprot); + struct.setCatalogIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_catalog_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_catalog_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new create_catalog_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new create_catalog_resultTupleSchemeFactory()); + } + + private AlreadyExistsException o1; // required + private InvalidObjectException o2; // required + private MetaException o3; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_catalog_result.class, metaDataMap); + } + + public create_catalog_result() { + } + + public create_catalog_result( + AlreadyExistsException o1, + InvalidObjectException o2, + MetaException o3) + { + this(); + this.o1 = o1; + this.o2 = o2; + this.o3 = o3; + } + + /** + * Performs a deep copy on other. + */ + public create_catalog_result(create_catalog_result other) { + if (other.isSetO1()) { + this.o1 = new AlreadyExistsException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new InvalidObjectException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); + } + } + + public create_catalog_result deepCopy() { + return new create_catalog_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o2 = null; + this.o3 = null; + } + + public AlreadyExistsException getO1() { + return this.o1; + } + + public void setO1(AlreadyExistsException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public InvalidObjectException getO2() { + return this.o2; + } + + public void setO2(InvalidObjectException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((AlreadyExistsException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((InvalidObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O2: + return getO2(); + + case O3: + return getO3(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof create_catalog_result) + return this.equals((create_catalog_result)that); + return false; + } + + public boolean equals(create_catalog_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + + return list.hashCode(); + } + + @Override + public int compareTo(create_catalog_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("create_catalog_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class create_catalog_resultStandardSchemeFactory implements SchemeFactory { + public create_catalog_resultStandardScheme getScheme() { + return new create_catalog_resultStandardScheme(); + } + } + + private static class create_catalog_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, create_catalog_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new AlreadyExistsException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new InvalidObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, create_catalog_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class create_catalog_resultTupleSchemeFactory implements SchemeFactory { + public create_catalog_resultTupleScheme getScheme() { + return new create_catalog_resultTupleScheme(); + } + } + + private static class create_catalog_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, create_catalog_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + if (struct.isSetO2()) { + optionals.set(1); + } + if (struct.isSetO3()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, create_catalog_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.o1 = new AlreadyExistsException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o2 = new InvalidObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + if (incoming.get(2)) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_catalog_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_catalog_args"); + + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_catalog_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_catalog_argsTupleSchemeFactory()); + } + + private GetCatalogRequest catName; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + CAT_NAME((short)1, "catName"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // CAT_NAME + return CAT_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetCatalogRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_catalog_args.class, metaDataMap); + } + + public get_catalog_args() { + } + + public get_catalog_args( + GetCatalogRequest catName) + { + this(); + this.catName = catName; + } + + /** + * Performs a deep copy on other. + */ + public get_catalog_args(get_catalog_args other) { + if (other.isSetCatName()) { + this.catName = new GetCatalogRequest(other.catName); + } + } + + public get_catalog_args deepCopy() { + return new get_catalog_args(this); + } + + @Override + public void clear() { + this.catName = null; + } + + public GetCatalogRequest getCatName() { + return this.catName; + } + + public void setCatName(GetCatalogRequest catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((GetCatalogRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case CAT_NAME: + return getCatName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case CAT_NAME: + return isSetCatName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_catalog_args) + return this.equals((get_catalog_args)that); + return false; + } + + public boolean equals(get_catalog_args that) { + if (that == null) + return false; + + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + + return list.hashCode(); + } + + @Override + public int compareTo(get_catalog_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_catalog_args("); + boolean first = true; + + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (catName != null) { + catName.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_catalog_argsStandardSchemeFactory implements SchemeFactory { + public get_catalog_argsStandardScheme getScheme() { + return new get_catalog_argsStandardScheme(); + } + } + + private static class get_catalog_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_catalog_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.catName = new GetCatalogRequest(); + struct.catName.read(iprot); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_catalog_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + struct.catName.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_catalog_argsTupleSchemeFactory implements SchemeFactory { + public get_catalog_argsTupleScheme getScheme() { + return new get_catalog_argsTupleScheme(); + } + } + + private static class get_catalog_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_catalog_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetCatName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + struct.catName.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_catalog_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catName = new GetCatalogRequest(); + struct.catName.read(iprot); + struct.setCatNameIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_catalog_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_catalog_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_catalog_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_catalog_resultTupleSchemeFactory()); + } + + private GetCatalogResponse success; // required + private NoSuchObjectException o1; // required + private MetaException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetCatalogResponse.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_catalog_result.class, metaDataMap); + } + + public get_catalog_result() { + } + + public get_catalog_result( + GetCatalogResponse success, + NoSuchObjectException o1, + MetaException o2) + { + this(); + this.success = success; + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public get_catalog_result(get_catalog_result other) { + if (other.isSetSuccess()) { + this.success = new GetCatalogResponse(other.success); + } + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + } + + public get_catalog_result deepCopy() { + return new get_catalog_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + this.o2 = null; + } + + public GetCatalogResponse getSuccess() { + return this.success; + } + + public void setSuccess(GetCatalogResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((GetCatalogResponse)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_catalog_result) + return this.equals((get_catalog_result)that); + return false; + } + + public boolean equals(get_catalog_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(get_catalog_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_catalog_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_catalog_resultStandardSchemeFactory implements SchemeFactory { + public get_catalog_resultStandardScheme getScheme() { + return new get_catalog_resultStandardScheme(); + } + } + + private static class get_catalog_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_catalog_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new GetCatalogResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_catalog_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_catalog_resultTupleSchemeFactory implements SchemeFactory { + public get_catalog_resultTupleScheme getScheme() { + return new get_catalog_resultTupleScheme(); + } + } + + private static class get_catalog_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_catalog_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_catalog_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.success = new GetCatalogResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_catalogs_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_catalogs_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_catalogs_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_catalogs_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_catalogs_args.class, metaDataMap); + } + + public get_catalogs_args() { + } + + /** + * Performs a deep copy on other. + */ + public get_catalogs_args(get_catalogs_args other) { + } + + public get_catalogs_args deepCopy() { + return new get_catalogs_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_catalogs_args) + return this.equals((get_catalogs_args)that); + return false; + } + + public boolean equals(get_catalogs_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(get_catalogs_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_catalogs_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_catalogs_argsStandardSchemeFactory implements SchemeFactory { + public get_catalogs_argsStandardScheme getScheme() { + return new get_catalogs_argsStandardScheme(); + } + } + + private static class get_catalogs_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_catalogs_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_catalogs_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_catalogs_argsTupleSchemeFactory implements SchemeFactory { + public get_catalogs_argsTupleScheme getScheme() { + return new get_catalogs_argsTupleScheme(); + } + } + + private static class get_catalogs_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_catalogs_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_catalogs_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_catalogs_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_catalogs_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_catalogs_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_catalogs_resultTupleSchemeFactory()); + } + + private GetCatalogsResponse success; // required + private MetaException o1; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetCatalogsResponse.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_catalogs_result.class, metaDataMap); + } + + public get_catalogs_result() { + } + + public get_catalogs_result( + GetCatalogsResponse success, + MetaException o1) + { + this(); + this.success = success; + this.o1 = o1; + } + + /** + * Performs a deep copy on other. + */ + public get_catalogs_result(get_catalogs_result other) { + if (other.isSetSuccess()) { + this.success = new GetCatalogsResponse(other.success); + } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + } + + public get_catalogs_result deepCopy() { + return new get_catalogs_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + } + + public GetCatalogsResponse getSuccess() { + return this.success; + } + + public void setSuccess(GetCatalogsResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((GetCatalogsResponse)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_catalogs_result) + return this.equals((get_catalogs_result)that); + return false; + } + + public boolean equals(get_catalogs_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + return list.hashCode(); + } + + @Override + public int compareTo(get_catalogs_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_catalogs_result("); boolean first = true; sb.append("success:"); @@ -31464,6 +35491,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -31482,15 +35512,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class getMetaConf_resultStandardSchemeFactory implements SchemeFactory { - public getMetaConf_resultStandardScheme getScheme() { - return new getMetaConf_resultStandardScheme(); + private static class get_catalogs_resultStandardSchemeFactory implements SchemeFactory { + public get_catalogs_resultStandardScheme getScheme() { + return new get_catalogs_resultStandardScheme(); } } - private static class getMetaConf_resultStandardScheme extends StandardScheme { + private static class get_catalogs_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, getMetaConf_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_catalogs_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -31501,8 +35531,9 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getMetaConf_result } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.success = iprot.readString(); + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new GetCatalogsResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -31526,13 +35557,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getMetaConf_result struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, getMetaConf_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_catalogs_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeString(struct.success); + struct.success.write(oprot); oprot.writeFieldEnd(); } if (struct.o1 != null) { @@ -31546,16 +35577,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getMetaConf_result } - private static class getMetaConf_resultTupleSchemeFactory implements SchemeFactory { - public getMetaConf_resultTupleScheme getScheme() { - return new getMetaConf_resultTupleScheme(); + private static class get_catalogs_resultTupleSchemeFactory implements SchemeFactory { + public get_catalogs_resultTupleScheme getScheme() { + return new get_catalogs_resultTupleScheme(); } } - private static class getMetaConf_resultTupleScheme extends TupleScheme { + private static class get_catalogs_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getMetaConf_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_catalogs_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -31566,7 +35597,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getMetaConf_result } oprot.writeBitSet(optionals, 2); if (struct.isSetSuccess()) { - oprot.writeString(struct.success); + struct.success.write(oprot); } if (struct.isSetO1()) { struct.o1.write(oprot); @@ -31574,11 +35605,12 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getMetaConf_result } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_catalogs_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { - struct.success = iprot.readString(); + struct.success = new GetCatalogsResponse(); + struct.success.read(iprot); struct.setSuccessIsSet(true); } if (incoming.get(1)) { @@ -31591,25 +35623,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_result s } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class setMetaConf_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("setMetaConf_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_catalog_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_catalog_args"); - private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new setMetaConf_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new setMetaConf_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new drop_catalog_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_catalog_argsTupleSchemeFactory()); } - private String key; // required - private String value; // required + private DropCatalogRequest catName; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - KEY((short)1, "key"), - VALUE((short)2, "value"); + CAT_NAME((short)1, "catName"); private static final Map byName = new HashMap(); @@ -31624,10 +35653,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_result s */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // KEY - return KEY; - case 2: // VALUE - return VALUE; + case 1: // CAT_NAME + return CAT_NAME; default: return null; } @@ -31671,109 +35698,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, DropCatalogRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setMetaConf_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_catalog_args.class, metaDataMap); } - public setMetaConf_args() { + public drop_catalog_args() { } - public setMetaConf_args( - String key, - String value) + public drop_catalog_args( + DropCatalogRequest catName) { this(); - this.key = key; - this.value = value; + this.catName = catName; } /** * Performs a deep copy on other. */ - public setMetaConf_args(setMetaConf_args other) { - if (other.isSetKey()) { - this.key = other.key; - } - if (other.isSetValue()) { - this.value = other.value; + public drop_catalog_args(drop_catalog_args other) { + if (other.isSetCatName()) { + this.catName = new DropCatalogRequest(other.catName); } } - public setMetaConf_args deepCopy() { - return new setMetaConf_args(this); + public drop_catalog_args deepCopy() { + return new drop_catalog_args(this); } @Override public void clear() { - this.key = null; - this.value = null; + this.catName = null; } - public String getKey() { - return this.key; + public DropCatalogRequest getCatName() { + return this.catName; } - public void setKey(String key) { - this.key = key; + public void setCatName(DropCatalogRequest catName) { + this.catName = catName; } - public void unsetKey() { - this.key = null; + public void unsetCatName() { + this.catName = null; } - /** Returns true if field key is set (has been assigned a value) and false otherwise */ - public boolean isSetKey() { - return this.key != null; + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; } - public void setKeyIsSet(boolean value) { + public void setCatNameIsSet(boolean value) { if (!value) { - this.key = null; - } - } - - public String getValue() { - return this.value; - } - - public void setValue(String value) { - this.value = value; - } - - public void unsetValue() { - this.value = null; - } - - /** Returns true if field value is set (has been assigned a value) and false otherwise */ - public boolean isSetValue() { - return this.value != null; - } - - public void setValueIsSet(boolean value) { - if (!value) { - this.value = null; + this.catName = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case KEY: + case CAT_NAME: if (value == null) { - unsetKey(); + unsetCatName(); } else { - setKey((String)value); - } - break; - - case VALUE: - if (value == null) { - unsetValue(); - } else { - setValue((String)value); + setCatName((DropCatalogRequest)value); } break; @@ -31782,11 +35770,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case KEY: - return getKey(); - - case VALUE: - return getValue(); + case CAT_NAME: + return getCatName(); } throw new IllegalStateException(); @@ -31799,10 +35784,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case KEY: - return isSetKey(); - case VALUE: - return isSetValue(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -31811,30 +35794,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof setMetaConf_args) - return this.equals((setMetaConf_args)that); + if (that instanceof drop_catalog_args) + return this.equals((drop_catalog_args)that); return false; } - public boolean equals(setMetaConf_args that) { + public boolean equals(drop_catalog_args that) { if (that == null) return false; - boolean this_present_key = true && this.isSetKey(); - boolean that_present_key = true && that.isSetKey(); - if (this_present_key || that_present_key) { - if (!(this_present_key && that_present_key)) + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) return false; - if (!this.key.equals(that.key)) - return false; - } - - boolean this_present_value = true && this.isSetValue(); - boolean that_present_value = true && that.isSetValue(); - if (this_present_value || that_present_value) { - if (!(this_present_value && that_present_value)) - return false; - if (!this.value.equals(that.value)) + if (!this.catName.equals(that.catName)) return false; } @@ -31845,43 +35819,28 @@ public boolean equals(setMetaConf_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_key = true && (isSetKey()); - list.add(present_key); - if (present_key) - list.add(key); - - boolean present_value = true && (isSetValue()); - list.add(present_value); - if (present_value) - list.add(value); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); return list.hashCode(); } @Override - public int compareTo(setMetaConf_args other) { + public int compareTo(drop_catalog_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetKey()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetValue()).compareTo(other.isSetValue()); + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); if (lastComparison != 0) { return lastComparison; } - if (isSetValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value); + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); if (lastComparison != 0) { return lastComparison; } @@ -31903,22 +35862,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("setMetaConf_args("); + StringBuilder sb = new StringBuilder("drop_catalog_args("); boolean first = true; - sb.append("key:"); - if (this.key == null) { + sb.append("catName:"); + if (this.catName == null) { sb.append("null"); } else { - sb.append(this.key); - } - first = false; - if (!first) sb.append(", "); - sb.append("value:"); - if (this.value == null) { - sb.append("null"); - } else { - sb.append(this.value); + sb.append(this.catName); } first = false; sb.append(")"); @@ -31928,6 +35879,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (catName != null) { + catName.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -31946,15 +35900,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class setMetaConf_argsStandardSchemeFactory implements SchemeFactory { - public setMetaConf_argsStandardScheme getScheme() { - return new setMetaConf_argsStandardScheme(); + private static class drop_catalog_argsStandardSchemeFactory implements SchemeFactory { + public drop_catalog_argsStandardScheme getScheme() { + return new drop_catalog_argsStandardScheme(); } } - private static class setMetaConf_argsStandardScheme extends StandardScheme { + private static class drop_catalog_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, setMetaConf_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_catalog_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -31964,18 +35918,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, setMetaConf_args st break; } switch (schemeField.id) { - case 1: // KEY - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.key = iprot.readString(); - struct.setKeyIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.value = iprot.readString(); - struct.setValueIsSet(true); + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.catName = new DropCatalogRequest(); + struct.catName.read(iprot); + struct.setCatNameIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -31989,18 +35936,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, setMetaConf_args st struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, setMetaConf_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_catalog_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.key != null) { - oprot.writeFieldBegin(KEY_FIELD_DESC); - oprot.writeString(struct.key); - oprot.writeFieldEnd(); - } - if (struct.value != null) { - oprot.writeFieldBegin(VALUE_FIELD_DESC); - oprot.writeString(struct.value); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + struct.catName.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -32009,66 +35951,63 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, setMetaConf_args s } - private static class setMetaConf_argsTupleSchemeFactory implements SchemeFactory { - public setMetaConf_argsTupleScheme getScheme() { - return new setMetaConf_argsTupleScheme(); + private static class drop_catalog_argsTupleSchemeFactory implements SchemeFactory { + public drop_catalog_argsTupleScheme getScheme() { + return new drop_catalog_argsTupleScheme(); } } - private static class setMetaConf_argsTupleScheme extends TupleScheme { + private static class drop_catalog_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, setMetaConf_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_catalog_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetKey()) { + if (struct.isSetCatName()) { optionals.set(0); } - if (struct.isSetValue()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetKey()) { - oprot.writeString(struct.key); - } - if (struct.isSetValue()) { - oprot.writeString(struct.value); + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + struct.catName.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, setMetaConf_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_catalog_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.key = iprot.readString(); - struct.setKeyIsSet(true); - } - if (incoming.get(1)) { - struct.value = iprot.readString(); - struct.setValueIsSet(true); + struct.catName = new DropCatalogRequest(); + struct.catName.read(iprot); + struct.setCatNameIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class setMetaConf_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("setMetaConf_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_catalog_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_catalog_result"); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new setMetaConf_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new setMetaConf_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new drop_catalog_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_catalog_resultTupleSchemeFactory()); } - private MetaException o1; // required + private NoSuchObjectException o1; // required + private InvalidOperationException o2; // required + private MetaException o3; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - O1((short)1, "o1"); + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"); private static final Map byName = new HashMap(); @@ -32085,6 +36024,10 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // O1 return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; default: return null; } @@ -32130,43 +36073,59 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setMetaConf_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_catalog_result.class, metaDataMap); } - public setMetaConf_result() { + public drop_catalog_result() { } - public setMetaConf_result( - MetaException o1) + public drop_catalog_result( + NoSuchObjectException o1, + InvalidOperationException o2, + MetaException o3) { this(); this.o1 = o1; + this.o2 = o2; + this.o3 = o3; } /** * Performs a deep copy on other. */ - public setMetaConf_result(setMetaConf_result other) { + public drop_catalog_result(drop_catalog_result other) { if (other.isSetO1()) { - this.o1 = new MetaException(other.o1); + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new InvalidOperationException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); } } - public setMetaConf_result deepCopy() { - return new setMetaConf_result(this); + public drop_catalog_result deepCopy() { + return new drop_catalog_result(this); } @Override public void clear() { this.o1 = null; + this.o2 = null; + this.o3 = null; } - public MetaException getO1() { + public NoSuchObjectException getO1() { return this.o1; } - public void setO1(MetaException o1) { + public void setO1(NoSuchObjectException o1) { this.o1 = o1; } @@ -32185,13 +36144,75 @@ public void setO1IsSet(boolean value) { } } + public InvalidOperationException getO2() { + return this.o2; + } + + public void setO2(InvalidOperationException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case O1: if (value == null) { unsetO1(); } else { - setO1((MetaException)value); + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((InvalidOperationException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); } break; @@ -32203,6 +36224,12 @@ public Object getFieldValue(_Fields field) { case O1: return getO1(); + case O2: + return getO2(); + + case O3: + return getO3(); + } throw new IllegalStateException(); } @@ -32216,6 +36243,10 @@ public boolean isSet(_Fields field) { switch (field) { case O1: return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); } throw new IllegalStateException(); } @@ -32224,12 +36255,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof setMetaConf_result) - return this.equals((setMetaConf_result)that); + if (that instanceof drop_catalog_result) + return this.equals((drop_catalog_result)that); return false; } - public boolean equals(setMetaConf_result that) { + public boolean equals(drop_catalog_result that) { if (that == null) return false; @@ -32242,6 +36273,24 @@ public boolean equals(setMetaConf_result that) { return false; } + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + return true; } @@ -32254,11 +36303,21 @@ public int hashCode() { if (present_o1) list.add(o1); + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + return list.hashCode(); } @Override - public int compareTo(setMetaConf_result other) { + public int compareTo(drop_catalog_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -32275,6 +36334,26 @@ public int compareTo(setMetaConf_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -32292,7 +36371,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("setMetaConf_result("); + StringBuilder sb = new StringBuilder("drop_catalog_result("); boolean first = true; sb.append("o1:"); @@ -32302,6 +36381,22 @@ public String toString() { sb.append(this.o1); } first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; sb.append(")"); return sb.toString(); } @@ -32327,15 +36422,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class setMetaConf_resultStandardSchemeFactory implements SchemeFactory { - public setMetaConf_resultStandardScheme getScheme() { - return new setMetaConf_resultStandardScheme(); + private static class drop_catalog_resultStandardSchemeFactory implements SchemeFactory { + public drop_catalog_resultStandardScheme getScheme() { + return new drop_catalog_resultStandardScheme(); } } - private static class setMetaConf_resultStandardScheme extends StandardScheme { + private static class drop_catalog_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, setMetaConf_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_catalog_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -32347,13 +36442,31 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, setMetaConf_result switch (schemeField.id) { case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new MetaException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new InvalidOperationException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -32363,7 +36476,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, setMetaConf_result struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, setMetaConf_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_catalog_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -32372,42 +36485,74 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, setMetaConf_result struct.o1.write(oprot); oprot.writeFieldEnd(); } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class setMetaConf_resultTupleSchemeFactory implements SchemeFactory { - public setMetaConf_resultTupleScheme getScheme() { - return new setMetaConf_resultTupleScheme(); + private static class drop_catalog_resultTupleSchemeFactory implements SchemeFactory { + public drop_catalog_resultTupleScheme getScheme() { + return new drop_catalog_resultTupleScheme(); } } - private static class setMetaConf_resultTupleScheme extends TupleScheme { + private static class drop_catalog_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, setMetaConf_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_catalog_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetO1()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetO2()) { + optionals.set(1); + } + if (struct.isSetO3()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetO1()) { struct.o1.write(oprot); } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, setMetaConf_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_catalog_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.o1 = new MetaException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } + if (incoming.get(1)) { + struct.o2 = new InvalidOperationException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + if (incoming.get(2)) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } } } @@ -36162,13 +40307,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list896 = iprot.readListBegin(); - struct.success = new ArrayList(_list896.size); - String _elem897; - for (int _i898 = 0; _i898 < _list896.size; ++_i898) + org.apache.thrift.protocol.TList _list904 = iprot.readListBegin(); + struct.success = new ArrayList(_list904.size); + String _elem905; + for (int _i906 = 0; _i906 < _list904.size; ++_i906) { - _elem897 = iprot.readString(); - struct.success.add(_elem897); + _elem905 = iprot.readString(); + struct.success.add(_elem905); } iprot.readListEnd(); } @@ -36203,9 +40348,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter899 : struct.success) + for (String _iter907 : struct.success) { - oprot.writeString(_iter899); + oprot.writeString(_iter907); } oprot.writeListEnd(); } @@ -36244,9 +40389,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter900 : struct.success) + for (String _iter908 : struct.success) { - oprot.writeString(_iter900); + oprot.writeString(_iter908); } } } @@ -36261,13 +40406,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list901.size); - String _elem902; - for (int _i903 = 0; _i903 < _list901.size; ++_i903) + org.apache.thrift.protocol.TList _list909 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list909.size); + String _elem910; + for (int _i911 = 0; _i911 < _list909.size; ++_i911) { - _elem902 = iprot.readString(); - struct.success.add(_elem902); + _elem910 = iprot.readString(); + struct.success.add(_elem910); } } struct.setSuccessIsSet(true); @@ -36921,13 +41066,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list904 = iprot.readListBegin(); - struct.success = new ArrayList(_list904.size); - String _elem905; - for (int _i906 = 0; _i906 < _list904.size; ++_i906) + org.apache.thrift.protocol.TList _list912 = iprot.readListBegin(); + struct.success = new ArrayList(_list912.size); + String _elem913; + for (int _i914 = 0; _i914 < _list912.size; ++_i914) { - _elem905 = iprot.readString(); - struct.success.add(_elem905); + _elem913 = iprot.readString(); + struct.success.add(_elem913); } iprot.readListEnd(); } @@ -36962,9 +41107,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter907 : struct.success) + for (String _iter915 : struct.success) { - oprot.writeString(_iter907); + oprot.writeString(_iter915); } oprot.writeListEnd(); } @@ -37003,9 +41148,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter908 : struct.success) + for (String _iter916 : struct.success) { - oprot.writeString(_iter908); + oprot.writeString(_iter916); } } } @@ -37020,13 +41165,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list909 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list909.size); - String _elem910; - for (int _i911 = 0; _i911 < _list909.size; ++_i911) + org.apache.thrift.protocol.TList _list917 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list917.size); + String _elem918; + for (int _i919 = 0; _i919 < _list917.size; ++_i919) { - _elem910 = iprot.readString(); - struct.success.add(_elem910); + _elem918 = iprot.readString(); + struct.success.add(_elem918); } } struct.setSuccessIsSet(true); @@ -41633,16 +45778,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map912 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map912.size); - String _key913; - Type _val914; - for (int _i915 = 0; _i915 < _map912.size; ++_i915) + org.apache.thrift.protocol.TMap _map920 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map920.size); + String _key921; + Type _val922; + for (int _i923 = 0; _i923 < _map920.size; ++_i923) { - _key913 = iprot.readString(); - _val914 = new Type(); - _val914.read(iprot); - struct.success.put(_key913, _val914); + _key921 = iprot.readString(); + _val922 = new Type(); + _val922.read(iprot); + struct.success.put(_key921, _val922); } iprot.readMapEnd(); } @@ -41677,10 +45822,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter916 : struct.success.entrySet()) + for (Map.Entry _iter924 : struct.success.entrySet()) { - oprot.writeString(_iter916.getKey()); - _iter916.getValue().write(oprot); + oprot.writeString(_iter924.getKey()); + _iter924.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -41719,10 +45864,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter917 : struct.success.entrySet()) + for (Map.Entry _iter925 : struct.success.entrySet()) { - oprot.writeString(_iter917.getKey()); - _iter917.getValue().write(oprot); + oprot.writeString(_iter925.getKey()); + _iter925.getValue().write(oprot); } } } @@ -41737,16 +45882,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map918 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map918.size); - String _key919; - Type _val920; - for (int _i921 = 0; _i921 < _map918.size; ++_i921) + org.apache.thrift.protocol.TMap _map926 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map926.size); + String _key927; + Type _val928; + for (int _i929 = 0; _i929 < _map926.size; ++_i929) { - _key919 = iprot.readString(); - _val920 = new Type(); - _val920.read(iprot); - struct.success.put(_key919, _val920); + _key927 = iprot.readString(); + _val928 = new Type(); + _val928.read(iprot); + struct.success.put(_key927, _val928); } } struct.setSuccessIsSet(true); @@ -42781,14 +46926,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list922 = iprot.readListBegin(); - struct.success = new ArrayList(_list922.size); - FieldSchema _elem923; - for (int _i924 = 0; _i924 < _list922.size; ++_i924) + org.apache.thrift.protocol.TList _list930 = iprot.readListBegin(); + struct.success = new ArrayList(_list930.size); + FieldSchema _elem931; + for (int _i932 = 0; _i932 < _list930.size; ++_i932) { - _elem923 = new FieldSchema(); - _elem923.read(iprot); - struct.success.add(_elem923); + _elem931 = new FieldSchema(); + _elem931.read(iprot); + struct.success.add(_elem931); } iprot.readListEnd(); } @@ -42841,9 +46986,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter925 : struct.success) + for (FieldSchema _iter933 : struct.success) { - _iter925.write(oprot); + _iter933.write(oprot); } oprot.writeListEnd(); } @@ -42898,9 +47043,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter926 : struct.success) + for (FieldSchema _iter934 : struct.success) { - _iter926.write(oprot); + _iter934.write(oprot); } } } @@ -42921,14 +47066,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list927 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list927.size); - FieldSchema _elem928; - for (int _i929 = 0; _i929 < _list927.size; ++_i929) + org.apache.thrift.protocol.TList _list935 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list935.size); + FieldSchema _elem936; + for (int _i937 = 0; _i937 < _list935.size; ++_i937) { - _elem928 = new FieldSchema(); - _elem928.read(iprot); - struct.success.add(_elem928); + _elem936 = new FieldSchema(); + _elem936.read(iprot); + struct.success.add(_elem936); } } struct.setSuccessIsSet(true); @@ -44082,14 +48227,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list930 = iprot.readListBegin(); - struct.success = new ArrayList(_list930.size); - FieldSchema _elem931; - for (int _i932 = 0; _i932 < _list930.size; ++_i932) + org.apache.thrift.protocol.TList _list938 = iprot.readListBegin(); + struct.success = new ArrayList(_list938.size); + FieldSchema _elem939; + for (int _i940 = 0; _i940 < _list938.size; ++_i940) { - _elem931 = new FieldSchema(); - _elem931.read(iprot); - struct.success.add(_elem931); + _elem939 = new FieldSchema(); + _elem939.read(iprot); + struct.success.add(_elem939); } iprot.readListEnd(); } @@ -44142,9 +48287,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter933 : struct.success) + for (FieldSchema _iter941 : struct.success) { - _iter933.write(oprot); + _iter941.write(oprot); } oprot.writeListEnd(); } @@ -44199,9 +48344,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter934 : struct.success) + for (FieldSchema _iter942 : struct.success) { - _iter934.write(oprot); + _iter942.write(oprot); } } } @@ -44222,14 +48367,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list935 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list935.size); - FieldSchema _elem936; - for (int _i937 = 0; _i937 < _list935.size; ++_i937) + org.apache.thrift.protocol.TList _list943 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list943.size); + FieldSchema _elem944; + for (int _i945 = 0; _i945 < _list943.size; ++_i945) { - _elem936 = new FieldSchema(); - _elem936.read(iprot); - struct.success.add(_elem936); + _elem944 = new FieldSchema(); + _elem944.read(iprot); + struct.success.add(_elem944); } } struct.setSuccessIsSet(true); @@ -45274,14 +49419,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list938 = iprot.readListBegin(); - struct.success = new ArrayList(_list938.size); - FieldSchema _elem939; - for (int _i940 = 0; _i940 < _list938.size; ++_i940) + org.apache.thrift.protocol.TList _list946 = iprot.readListBegin(); + struct.success = new ArrayList(_list946.size); + FieldSchema _elem947; + for (int _i948 = 0; _i948 < _list946.size; ++_i948) { - _elem939 = new FieldSchema(); - _elem939.read(iprot); - struct.success.add(_elem939); + _elem947 = new FieldSchema(); + _elem947.read(iprot); + struct.success.add(_elem947); } iprot.readListEnd(); } @@ -45334,9 +49479,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter941 : struct.success) + for (FieldSchema _iter949 : struct.success) { - _iter941.write(oprot); + _iter949.write(oprot); } oprot.writeListEnd(); } @@ -45391,9 +49536,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter942 : struct.success) + for (FieldSchema _iter950 : struct.success) { - _iter942.write(oprot); + _iter950.write(oprot); } } } @@ -45414,14 +49559,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list943 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list943.size); - FieldSchema _elem944; - for (int _i945 = 0; _i945 < _list943.size; ++_i945) + org.apache.thrift.protocol.TList _list951 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list951.size); + FieldSchema _elem952; + for (int _i953 = 0; _i953 < _list951.size; ++_i953) { - _elem944 = new FieldSchema(); - _elem944.read(iprot); - struct.success.add(_elem944); + _elem952 = new FieldSchema(); + _elem952.read(iprot); + struct.success.add(_elem952); } } struct.setSuccessIsSet(true); @@ -46575,14 +50720,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list946 = iprot.readListBegin(); - struct.success = new ArrayList(_list946.size); - FieldSchema _elem947; - for (int _i948 = 0; _i948 < _list946.size; ++_i948) + org.apache.thrift.protocol.TList _list954 = iprot.readListBegin(); + struct.success = new ArrayList(_list954.size); + FieldSchema _elem955; + for (int _i956 = 0; _i956 < _list954.size; ++_i956) { - _elem947 = new FieldSchema(); - _elem947.read(iprot); - struct.success.add(_elem947); + _elem955 = new FieldSchema(); + _elem955.read(iprot); + struct.success.add(_elem955); } iprot.readListEnd(); } @@ -46635,9 +50780,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter949 : struct.success) + for (FieldSchema _iter957 : struct.success) { - _iter949.write(oprot); + _iter957.write(oprot); } oprot.writeListEnd(); } @@ -46692,9 +50837,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter950 : struct.success) + for (FieldSchema _iter958 : struct.success) { - _iter950.write(oprot); + _iter958.write(oprot); } } } @@ -46715,14 +50860,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list951 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list951.size); - FieldSchema _elem952; - for (int _i953 = 0; _i953 < _list951.size; ++_i953) + org.apache.thrift.protocol.TList _list959 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list959.size); + FieldSchema _elem960; + for (int _i961 = 0; _i961 < _list959.size; ++_i961) { - _elem952 = new FieldSchema(); - _elem952.read(iprot); - struct.success.add(_elem952); + _elem960 = new FieldSchema(); + _elem960.read(iprot); + struct.success.add(_elem960); } } struct.setSuccessIsSet(true); @@ -49851,14 +53996,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list954 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list954.size); - SQLPrimaryKey _elem955; - for (int _i956 = 0; _i956 < _list954.size; ++_i956) + org.apache.thrift.protocol.TList _list962 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list962.size); + SQLPrimaryKey _elem963; + for (int _i964 = 0; _i964 < _list962.size; ++_i964) { - _elem955 = new SQLPrimaryKey(); - _elem955.read(iprot); - struct.primaryKeys.add(_elem955); + _elem963 = new SQLPrimaryKey(); + _elem963.read(iprot); + struct.primaryKeys.add(_elem963); } iprot.readListEnd(); } @@ -49870,14 +54015,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list957 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list957.size); - SQLForeignKey _elem958; - for (int _i959 = 0; _i959 < _list957.size; ++_i959) + org.apache.thrift.protocol.TList _list965 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list965.size); + SQLForeignKey _elem966; + for (int _i967 = 0; _i967 < _list965.size; ++_i967) { - _elem958 = new SQLForeignKey(); - _elem958.read(iprot); - struct.foreignKeys.add(_elem958); + _elem966 = new SQLForeignKey(); + _elem966.read(iprot); + struct.foreignKeys.add(_elem966); } iprot.readListEnd(); } @@ -49889,14 +54034,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 4: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list960 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list960.size); - SQLUniqueConstraint _elem961; - for (int _i962 = 0; _i962 < _list960.size; ++_i962) + org.apache.thrift.protocol.TList _list968 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list968.size); + SQLUniqueConstraint _elem969; + for (int _i970 = 0; _i970 < _list968.size; ++_i970) { - _elem961 = new SQLUniqueConstraint(); - _elem961.read(iprot); - struct.uniqueConstraints.add(_elem961); + _elem969 = new SQLUniqueConstraint(); + _elem969.read(iprot); + struct.uniqueConstraints.add(_elem969); } iprot.readListEnd(); } @@ -49908,14 +54053,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 5: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list963 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list963.size); - SQLNotNullConstraint _elem964; - for (int _i965 = 0; _i965 < _list963.size; ++_i965) + org.apache.thrift.protocol.TList _list971 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list971.size); + SQLNotNullConstraint _elem972; + for (int _i973 = 0; _i973 < _list971.size; ++_i973) { - _elem964 = new SQLNotNullConstraint(); - _elem964.read(iprot); - struct.notNullConstraints.add(_elem964); + _elem972 = new SQLNotNullConstraint(); + _elem972.read(iprot); + struct.notNullConstraints.add(_elem972); } iprot.readListEnd(); } @@ -49927,14 +54072,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 6: // DEFAULT_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list966 = iprot.readListBegin(); - struct.defaultConstraints = new ArrayList(_list966.size); - SQLDefaultConstraint _elem967; - for (int _i968 = 0; _i968 < _list966.size; ++_i968) + org.apache.thrift.protocol.TList _list974 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list974.size); + SQLDefaultConstraint _elem975; + for (int _i976 = 0; _i976 < _list974.size; ++_i976) { - _elem967 = new SQLDefaultConstraint(); - _elem967.read(iprot); - struct.defaultConstraints.add(_elem967); + _elem975 = new SQLDefaultConstraint(); + _elem975.read(iprot); + struct.defaultConstraints.add(_elem975); } iprot.readListEnd(); } @@ -49946,14 +54091,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 7: // CHECK_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list969 = iprot.readListBegin(); - struct.checkConstraints = new ArrayList(_list969.size); - SQLCheckConstraint _elem970; - for (int _i971 = 0; _i971 < _list969.size; ++_i971) + org.apache.thrift.protocol.TList _list977 = iprot.readListBegin(); + struct.checkConstraints = new ArrayList(_list977.size); + SQLCheckConstraint _elem978; + for (int _i979 = 0; _i979 < _list977.size; ++_i979) { - _elem970 = new SQLCheckConstraint(); - _elem970.read(iprot); - struct.checkConstraints.add(_elem970); + _elem978 = new SQLCheckConstraint(); + _elem978.read(iprot); + struct.checkConstraints.add(_elem978); } iprot.readListEnd(); } @@ -49984,9 +54129,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter972 : struct.primaryKeys) + for (SQLPrimaryKey _iter980 : struct.primaryKeys) { - _iter972.write(oprot); + _iter980.write(oprot); } oprot.writeListEnd(); } @@ -49996,9 +54141,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter973 : struct.foreignKeys) + for (SQLForeignKey _iter981 : struct.foreignKeys) { - _iter973.write(oprot); + _iter981.write(oprot); } oprot.writeListEnd(); } @@ -50008,9 +54153,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter974 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter982 : struct.uniqueConstraints) { - _iter974.write(oprot); + _iter982.write(oprot); } oprot.writeListEnd(); } @@ -50020,9 +54165,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter975 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter983 : struct.notNullConstraints) { - _iter975.write(oprot); + _iter983.write(oprot); } oprot.writeListEnd(); } @@ -50032,9 +54177,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); - for (SQLDefaultConstraint _iter976 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter984 : struct.defaultConstraints) { - _iter976.write(oprot); + _iter984.write(oprot); } oprot.writeListEnd(); } @@ -50044,9 +54189,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size())); - for (SQLCheckConstraint _iter977 : struct.checkConstraints) + for (SQLCheckConstraint _iter985 : struct.checkConstraints) { - _iter977.write(oprot); + _iter985.write(oprot); } oprot.writeListEnd(); } @@ -50098,54 +54243,54 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter978 : struct.primaryKeys) + for (SQLPrimaryKey _iter986 : struct.primaryKeys) { - _iter978.write(oprot); + _iter986.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter979 : struct.foreignKeys) + for (SQLForeignKey _iter987 : struct.foreignKeys) { - _iter979.write(oprot); + _iter987.write(oprot); } } } if (struct.isSetUniqueConstraints()) { { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter980 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter988 : struct.uniqueConstraints) { - _iter980.write(oprot); + _iter988.write(oprot); } } } if (struct.isSetNotNullConstraints()) { { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter981 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter989 : struct.notNullConstraints) { - _iter981.write(oprot); + _iter989.write(oprot); } } } if (struct.isSetDefaultConstraints()) { { oprot.writeI32(struct.defaultConstraints.size()); - for (SQLDefaultConstraint _iter982 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter990 : struct.defaultConstraints) { - _iter982.write(oprot); + _iter990.write(oprot); } } } if (struct.isSetCheckConstraints()) { { oprot.writeI32(struct.checkConstraints.size()); - for (SQLCheckConstraint _iter983 : struct.checkConstraints) + for (SQLCheckConstraint _iter991 : struct.checkConstraints) { - _iter983.write(oprot); + _iter991.write(oprot); } } } @@ -50162,84 +54307,84 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list984 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list984.size); - SQLPrimaryKey _elem985; - for (int _i986 = 0; _i986 < _list984.size; ++_i986) + org.apache.thrift.protocol.TList _list992 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list992.size); + SQLPrimaryKey _elem993; + for (int _i994 = 0; _i994 < _list992.size; ++_i994) { - _elem985 = new SQLPrimaryKey(); - _elem985.read(iprot); - struct.primaryKeys.add(_elem985); + _elem993 = new SQLPrimaryKey(); + _elem993.read(iprot); + struct.primaryKeys.add(_elem993); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list987 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list987.size); - SQLForeignKey _elem988; - for (int _i989 = 0; _i989 < _list987.size; ++_i989) + org.apache.thrift.protocol.TList _list995 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list995.size); + SQLForeignKey _elem996; + for (int _i997 = 0; _i997 < _list995.size; ++_i997) { - _elem988 = new SQLForeignKey(); - _elem988.read(iprot); - struct.foreignKeys.add(_elem988); + _elem996 = new SQLForeignKey(); + _elem996.read(iprot); + struct.foreignKeys.add(_elem996); } } struct.setForeignKeysIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list990 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list990.size); - SQLUniqueConstraint _elem991; - for (int _i992 = 0; _i992 < _list990.size; ++_i992) + org.apache.thrift.protocol.TList _list998 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list998.size); + SQLUniqueConstraint _elem999; + for (int _i1000 = 0; _i1000 < _list998.size; ++_i1000) { - _elem991 = new SQLUniqueConstraint(); - _elem991.read(iprot); - struct.uniqueConstraints.add(_elem991); + _elem999 = new SQLUniqueConstraint(); + _elem999.read(iprot); + struct.uniqueConstraints.add(_elem999); } } struct.setUniqueConstraintsIsSet(true); } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list993 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list993.size); - SQLNotNullConstraint _elem994; - for (int _i995 = 0; _i995 < _list993.size; ++_i995) + org.apache.thrift.protocol.TList _list1001 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list1001.size); + SQLNotNullConstraint _elem1002; + for (int _i1003 = 0; _i1003 < _list1001.size; ++_i1003) { - _elem994 = new SQLNotNullConstraint(); - _elem994.read(iprot); - struct.notNullConstraints.add(_elem994); + _elem1002 = new SQLNotNullConstraint(); + _elem1002.read(iprot); + struct.notNullConstraints.add(_elem1002); } } struct.setNotNullConstraintsIsSet(true); } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list996 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraints = new ArrayList(_list996.size); - SQLDefaultConstraint _elem997; - for (int _i998 = 0; _i998 < _list996.size; ++_i998) + org.apache.thrift.protocol.TList _list1004 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list1004.size); + SQLDefaultConstraint _elem1005; + for (int _i1006 = 0; _i1006 < _list1004.size; ++_i1006) { - _elem997 = new SQLDefaultConstraint(); - _elem997.read(iprot); - struct.defaultConstraints.add(_elem997); + _elem1005 = new SQLDefaultConstraint(); + _elem1005.read(iprot); + struct.defaultConstraints.add(_elem1005); } } struct.setDefaultConstraintsIsSet(true); } if (incoming.get(6)) { { - org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.checkConstraints = new ArrayList(_list999.size); - SQLCheckConstraint _elem1000; - for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001) + org.apache.thrift.protocol.TList _list1007 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.checkConstraints = new ArrayList(_list1007.size); + SQLCheckConstraint _elem1008; + for (int _i1009 = 0; _i1009 < _list1007.size; ++_i1009) { - _elem1000 = new SQLCheckConstraint(); - _elem1000.read(iprot); - struct.checkConstraints.add(_elem1000); + _elem1008 = new SQLCheckConstraint(); + _elem1008.read(iprot); + struct.checkConstraints.add(_elem1008); } } struct.setCheckConstraintsIsSet(true); @@ -59389,13 +63534,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args case 3: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list1002.size); - String _elem1003; - for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004) + org.apache.thrift.protocol.TList _list1010 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list1010.size); + String _elem1011; + for (int _i1012 = 0; _i1012 < _list1010.size; ++_i1012) { - _elem1003 = iprot.readString(); - struct.partNames.add(_elem1003); + _elem1011 = iprot.readString(); + struct.partNames.add(_elem1011); } iprot.readListEnd(); } @@ -59431,9 +63576,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_arg oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter1005 : struct.partNames) + for (String _iter1013 : struct.partNames) { - oprot.writeString(_iter1005); + oprot.writeString(_iter1013); } oprot.writeListEnd(); } @@ -59476,9 +63621,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); - for (String _iter1006 : struct.partNames) + for (String _iter1014 : struct.partNames) { - oprot.writeString(_iter1006); + oprot.writeString(_iter1014); } } } @@ -59498,13 +63643,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1007 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list1007.size); - String _elem1008; - for (int _i1009 = 0; _i1009 < _list1007.size; ++_i1009) + org.apache.thrift.protocol.TList _list1015 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list1015.size); + String _elem1016; + for (int _i1017 = 0; _i1017 < _list1015.size; ++_i1017) { - _elem1008 = iprot.readString(); - struct.partNames.add(_elem1008); + _elem1016 = iprot.readString(); + struct.partNames.add(_elem1016); } } struct.setPartNamesIsSet(true); @@ -60729,13 +64874,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1010 = iprot.readListBegin(); - struct.success = new ArrayList(_list1010.size); - String _elem1011; - for (int _i1012 = 0; _i1012 < _list1010.size; ++_i1012) + org.apache.thrift.protocol.TList _list1018 = iprot.readListBegin(); + struct.success = new ArrayList(_list1018.size); + String _elem1019; + for (int _i1020 = 0; _i1020 < _list1018.size; ++_i1020) { - _elem1011 = iprot.readString(); - struct.success.add(_elem1011); + _elem1019 = iprot.readString(); + struct.success.add(_elem1019); } iprot.readListEnd(); } @@ -60770,9 +64915,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1013 : struct.success) + for (String _iter1021 : struct.success) { - oprot.writeString(_iter1013); + oprot.writeString(_iter1021); } oprot.writeListEnd(); } @@ -60811,9 +64956,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1014 : struct.success) + for (String _iter1022 : struct.success) { - oprot.writeString(_iter1014); + oprot.writeString(_iter1022); } } } @@ -60828,13 +64973,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1015 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1015.size); - String _elem1016; - for (int _i1017 = 0; _i1017 < _list1015.size; ++_i1017) + org.apache.thrift.protocol.TList _list1023 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1023.size); + String _elem1024; + for (int _i1025 = 0; _i1025 < _list1023.size; ++_i1025) { - _elem1016 = iprot.readString(); - struct.success.add(_elem1016); + _elem1024 = iprot.readString(); + struct.success.add(_elem1024); } } struct.setSuccessIsSet(true); @@ -61808,13 +65953,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1018 = iprot.readListBegin(); - struct.success = new ArrayList(_list1018.size); - String _elem1019; - for (int _i1020 = 0; _i1020 < _list1018.size; ++_i1020) + org.apache.thrift.protocol.TList _list1026 = iprot.readListBegin(); + struct.success = new ArrayList(_list1026.size); + String _elem1027; + for (int _i1028 = 0; _i1028 < _list1026.size; ++_i1028) { - _elem1019 = iprot.readString(); - struct.success.add(_elem1019); + _elem1027 = iprot.readString(); + struct.success.add(_elem1027); } iprot.readListEnd(); } @@ -61849,9 +65994,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1021 : struct.success) + for (String _iter1029 : struct.success) { - oprot.writeString(_iter1021); + oprot.writeString(_iter1029); } oprot.writeListEnd(); } @@ -61890,9 +66035,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1022 : struct.success) + for (String _iter1030 : struct.success) { - oprot.writeString(_iter1022); + oprot.writeString(_iter1030); } } } @@ -61907,13 +66052,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_r BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1023 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1023.size); - String _elem1024; - for (int _i1025 = 0; _i1025 < _list1023.size; ++_i1025) + org.apache.thrift.protocol.TList _list1031 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1031.size); + String _elem1032; + for (int _i1033 = 0; _i1033 < _list1031.size; ++_i1033) { - _elem1024 = iprot.readString(); - struct.success.add(_elem1024); + _elem1032 = iprot.readString(); + struct.success.add(_elem1032); } } struct.setSuccessIsSet(true); @@ -62679,13 +66824,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1026 = iprot.readListBegin(); - struct.success = new ArrayList(_list1026.size); - String _elem1027; - for (int _i1028 = 0; _i1028 < _list1026.size; ++_i1028) + org.apache.thrift.protocol.TList _list1034 = iprot.readListBegin(); + struct.success = new ArrayList(_list1034.size); + String _elem1035; + for (int _i1036 = 0; _i1036 < _list1034.size; ++_i1036) { - _elem1027 = iprot.readString(); - struct.success.add(_elem1027); + _elem1035 = iprot.readString(); + struct.success.add(_elem1035); } iprot.readListEnd(); } @@ -62720,9 +66865,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_v oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1029 : struct.success) + for (String _iter1037 : struct.success) { - oprot.writeString(_iter1029); + oprot.writeString(_iter1037); } oprot.writeListEnd(); } @@ -62761,9 +66906,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_vi if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1030 : struct.success) + for (String _iter1038 : struct.success) { - oprot.writeString(_iter1030); + oprot.writeString(_iter1038); } } } @@ -62778,13 +66923,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_vie BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1031 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1031.size); - String _elem1032; - for (int _i1033 = 0; _i1033 < _list1031.size; ++_i1033) + org.apache.thrift.protocol.TList _list1039 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1039.size); + String _elem1040; + for (int _i1041 = 0; _i1041 < _list1039.size; ++_i1041) { - _elem1032 = iprot.readString(); - struct.success.add(_elem1032); + _elem1040 = iprot.readString(); + struct.success.add(_elem1040); } } struct.setSuccessIsSet(true); @@ -63289,13 +67434,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args case 3: // TBL_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1034 = iprot.readListBegin(); - struct.tbl_types = new ArrayList(_list1034.size); - String _elem1035; - for (int _i1036 = 0; _i1036 < _list1034.size; ++_i1036) + org.apache.thrift.protocol.TList _list1042 = iprot.readListBegin(); + struct.tbl_types = new ArrayList(_list1042.size); + String _elem1043; + for (int _i1044 = 0; _i1044 < _list1042.size; ++_i1044) { - _elem1035 = iprot.readString(); - struct.tbl_types.add(_elem1035); + _elem1043 = iprot.readString(); + struct.tbl_types.add(_elem1043); } iprot.readListEnd(); } @@ -63331,9 +67476,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (String _iter1037 : struct.tbl_types) + for (String _iter1045 : struct.tbl_types) { - oprot.writeString(_iter1037); + oprot.writeString(_iter1045); } oprot.writeListEnd(); } @@ -63376,9 +67521,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args if (struct.isSetTbl_types()) { { oprot.writeI32(struct.tbl_types.size()); - for (String _iter1038 : struct.tbl_types) + for (String _iter1046 : struct.tbl_types) { - oprot.writeString(_iter1038); + oprot.writeString(_iter1046); } } } @@ -63398,13 +67543,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1039 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_types = new ArrayList(_list1039.size); - String _elem1040; - for (int _i1041 = 0; _i1041 < _list1039.size; ++_i1041) + org.apache.thrift.protocol.TList _list1047 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_types = new ArrayList(_list1047.size); + String _elem1048; + for (int _i1049 = 0; _i1049 < _list1047.size; ++_i1049) { - _elem1040 = iprot.readString(); - struct.tbl_types.add(_elem1040); + _elem1048 = iprot.readString(); + struct.tbl_types.add(_elem1048); } } struct.setTbl_typesIsSet(true); @@ -63810,14 +67955,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1042 = iprot.readListBegin(); - struct.success = new ArrayList(_list1042.size); - TableMeta _elem1043; - for (int _i1044 = 0; _i1044 < _list1042.size; ++_i1044) + org.apache.thrift.protocol.TList _list1050 = iprot.readListBegin(); + struct.success = new ArrayList(_list1050.size); + TableMeta _elem1051; + for (int _i1052 = 0; _i1052 < _list1050.size; ++_i1052) { - _elem1043 = new TableMeta(); - _elem1043.read(iprot); - struct.success.add(_elem1043); + _elem1051 = new TableMeta(); + _elem1051.read(iprot); + struct.success.add(_elem1051); } iprot.readListEnd(); } @@ -63852,9 +67997,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter1045 : struct.success) + for (TableMeta _iter1053 : struct.success) { - _iter1045.write(oprot); + _iter1053.write(oprot); } oprot.writeListEnd(); } @@ -63893,9 +68038,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter1046 : struct.success) + for (TableMeta _iter1054 : struct.success) { - _iter1046.write(oprot); + _iter1054.write(oprot); } } } @@ -63910,14 +68055,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1047 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1047.size); - TableMeta _elem1048; - for (int _i1049 = 0; _i1049 < _list1047.size; ++_i1049) + org.apache.thrift.protocol.TList _list1055 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1055.size); + TableMeta _elem1056; + for (int _i1057 = 0; _i1057 < _list1055.size; ++_i1057) { - _elem1048 = new TableMeta(); - _elem1048.read(iprot); - struct.success.add(_elem1048); + _elem1056 = new TableMeta(); + _elem1056.read(iprot); + struct.success.add(_elem1056); } } struct.setSuccessIsSet(true); @@ -64683,13 +68828,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1050 = iprot.readListBegin(); - struct.success = new ArrayList(_list1050.size); - String _elem1051; - for (int _i1052 = 0; _i1052 < _list1050.size; ++_i1052) + org.apache.thrift.protocol.TList _list1058 = iprot.readListBegin(); + struct.success = new ArrayList(_list1058.size); + String _elem1059; + for (int _i1060 = 0; _i1060 < _list1058.size; ++_i1060) { - _elem1051 = iprot.readString(); - struct.success.add(_elem1051); + _elem1059 = iprot.readString(); + struct.success.add(_elem1059); } iprot.readListEnd(); } @@ -64724,9 +68869,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1053 : struct.success) + for (String _iter1061 : struct.success) { - oprot.writeString(_iter1053); + oprot.writeString(_iter1061); } oprot.writeListEnd(); } @@ -64765,9 +68910,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1054 : struct.success) + for (String _iter1062 : struct.success) { - oprot.writeString(_iter1054); + oprot.writeString(_iter1062); } } } @@ -64782,13 +68927,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1055 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1055.size); - String _elem1056; - for (int _i1057 = 0; _i1057 < _list1055.size; ++_i1057) + org.apache.thrift.protocol.TList _list1063 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1063.size); + String _elem1064; + for (int _i1065 = 0; _i1065 < _list1063.size; ++_i1065) { - _elem1056 = iprot.readString(); - struct.success.add(_elem1056); + _elem1064 = iprot.readString(); + struct.success.add(_elem1064); } } struct.setSuccessIsSet(true); @@ -66241,13 +70386,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1058 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list1058.size); - String _elem1059; - for (int _i1060 = 0; _i1060 < _list1058.size; ++_i1060) + org.apache.thrift.protocol.TList _list1066 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1066.size); + String _elem1067; + for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068) { - _elem1059 = iprot.readString(); - struct.tbl_names.add(_elem1059); + _elem1067 = iprot.readString(); + struct.tbl_names.add(_elem1067); } iprot.readListEnd(); } @@ -66278,9 +70423,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter1061 : struct.tbl_names) + for (String _iter1069 : struct.tbl_names) { - oprot.writeString(_iter1061); + oprot.writeString(_iter1069); } oprot.writeListEnd(); } @@ -66317,9 +70462,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter1062 : struct.tbl_names) + for (String _iter1070 : struct.tbl_names) { - oprot.writeString(_iter1062); + oprot.writeString(_iter1070); } } } @@ -66335,13 +70480,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1063 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list1063.size); - String _elem1064; - for (int _i1065 = 0; _i1065 < _list1063.size; ++_i1065) + org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1071.size); + String _elem1072; + for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073) { - _elem1064 = iprot.readString(); - struct.tbl_names.add(_elem1064); + _elem1072 = iprot.readString(); + struct.tbl_names.add(_elem1072); } } struct.setTbl_namesIsSet(true); @@ -66666,14 +70811,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1066 = iprot.readListBegin(); - struct.success = new ArrayList
(_list1066.size); - Table _elem1067; - for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068) + org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1074.size); + Table _elem1075; + for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076) { - _elem1067 = new Table(); - _elem1067.read(iprot); - struct.success.add(_elem1067); + _elem1075 = new Table(); + _elem1075.read(iprot); + struct.success.add(_elem1075); } iprot.readListEnd(); } @@ -66699,9 +70844,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1069 : struct.success) + for (Table _iter1077 : struct.success) { - _iter1069.write(oprot); + _iter1077.write(oprot); } oprot.writeListEnd(); } @@ -66732,9 +70877,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1070 : struct.success) + for (Table _iter1078 : struct.success) { - _iter1070.write(oprot); + _iter1078.write(oprot); } } } @@ -66746,14 +70891,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list1071.size); - Table _elem1072; - for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073) + org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1079.size); + Table _elem1080; + for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081) { - _elem1072 = new Table(); - _elem1072.read(iprot); - struct.success.add(_elem1072); + _elem1080 = new Table(); + _elem1080.read(iprot); + struct.success.add(_elem1080); } } struct.setSuccessIsSet(true); @@ -69146,13 +73291,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list1074.size); - String _elem1075; - for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076) + org.apache.thrift.protocol.TList _list1082 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1082.size); + String _elem1083; + for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084) { - _elem1075 = iprot.readString(); - struct.tbl_names.add(_elem1075); + _elem1083 = iprot.readString(); + struct.tbl_names.add(_elem1083); } iprot.readListEnd(); } @@ -69183,9 +73328,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter1077 : struct.tbl_names) + for (String _iter1085 : struct.tbl_names) { - oprot.writeString(_iter1077); + oprot.writeString(_iter1085); } oprot.writeListEnd(); } @@ -69222,9 +73367,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter1078 : struct.tbl_names) + for (String _iter1086 : struct.tbl_names) { - oprot.writeString(_iter1078); + oprot.writeString(_iter1086); } } } @@ -69240,13 +73385,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list1079.size); - String _elem1080; - for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081) + org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1087.size); + String _elem1088; + for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089) { - _elem1080 = iprot.readString(); - struct.tbl_names.add(_elem1080); + _elem1088 = iprot.readString(); + struct.tbl_names.add(_elem1088); } } struct.setTbl_namesIsSet(true); @@ -69819,16 +73964,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1082 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1082.size); - String _key1083; - Materialization _val1084; - for (int _i1085 = 0; _i1085 < _map1082.size; ++_i1085) + org.apache.thrift.protocol.TMap _map1090 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1090.size); + String _key1091; + Materialization _val1092; + for (int _i1093 = 0; _i1093 < _map1090.size; ++_i1093) { - _key1083 = iprot.readString(); - _val1084 = new Materialization(); - _val1084.read(iprot); - struct.success.put(_key1083, _val1084); + _key1091 = iprot.readString(); + _val1092 = new Materialization(); + _val1092.read(iprot); + struct.success.put(_key1091, _val1092); } iprot.readMapEnd(); } @@ -69881,10 +74026,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter1086 : struct.success.entrySet()) + for (Map.Entry _iter1094 : struct.success.entrySet()) { - oprot.writeString(_iter1086.getKey()); - _iter1086.getValue().write(oprot); + oprot.writeString(_iter1094.getKey()); + _iter1094.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -69939,10 +74084,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1087 : struct.success.entrySet()) + for (Map.Entry _iter1095 : struct.success.entrySet()) { - oprot.writeString(_iter1087.getKey()); - _iter1087.getValue().write(oprot); + oprot.writeString(_iter1095.getKey()); + _iter1095.getValue().write(oprot); } } } @@ -69963,16 +74108,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1088 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map1088.size); - String _key1089; - Materialization _val1090; - for (int _i1091 = 0; _i1091 < _map1088.size; ++_i1091) + org.apache.thrift.protocol.TMap _map1096 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map1096.size); + String _key1097; + Materialization _val1098; + for (int _i1099 = 0; _i1099 < _map1096.size; ++_i1099) { - _key1089 = iprot.readString(); - _val1090 = new Materialization(); - _val1090.read(iprot); - struct.success.put(_key1089, _val1090); + _key1097 = iprot.readString(); + _val1098 = new Materialization(); + _val1098.read(iprot); + struct.success.put(_key1097, _val1098); } } struct.setSuccessIsSet(true); @@ -70000,9 +74145,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_creation_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_creation_metadata_args"); - private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField CREATION_METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("creation_metadata", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField CREATION_METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("creation_metadata", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -70010,15 +74156,17 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ schemes.put(TupleScheme.class, new update_creation_metadata_argsTupleSchemeFactory()); } + private String catName; // required private String dbname; // required private String tbl_name; // required private CreationMetadata creation_metadata; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DBNAME((short)1, "dbname"), - TBL_NAME((short)2, "tbl_name"), - CREATION_METADATA((short)3, "creation_metadata"); + CAT_NAME((short)1, "catName"), + DBNAME((short)2, "dbname"), + TBL_NAME((short)3, "tbl_name"), + CREATION_METADATA((short)4, "creation_metadata"); private static final Map byName = new HashMap(); @@ -70033,11 +74181,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DBNAME + case 1: // CAT_NAME + return CAT_NAME; + case 2: // DBNAME return DBNAME; - case 2: // TBL_NAME + case 3: // TBL_NAME return TBL_NAME; - case 3: // CREATION_METADATA + case 4: // CREATION_METADATA return CREATION_METADATA; default: return null; @@ -70082,6 +74232,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DBNAME, new org.apache.thrift.meta_data.FieldMetaData("dbname", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -70096,11 +74248,13 @@ public update_creation_metadata_args() { } public update_creation_metadata_args( + String catName, String dbname, String tbl_name, CreationMetadata creation_metadata) { this(); + this.catName = catName; this.dbname = dbname; this.tbl_name = tbl_name; this.creation_metadata = creation_metadata; @@ -70110,6 +74264,9 @@ public update_creation_metadata_args( * Performs a deep copy on other. */ public update_creation_metadata_args(update_creation_metadata_args other) { + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetDbname()) { this.dbname = other.dbname; } @@ -70127,11 +74284,35 @@ public update_creation_metadata_args deepCopy() { @Override public void clear() { + this.catName = null; this.dbname = null; this.tbl_name = null; this.creation_metadata = null; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getDbname() { return this.dbname; } @@ -70203,6 +74384,14 @@ public void setCreation_metadataIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case DBNAME: if (value == null) { unsetDbname(); @@ -70232,6 +74421,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case DBNAME: return getDbname(); @@ -70252,6 +74444,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case DBNAME: return isSetDbname(); case TBL_NAME: @@ -70275,6 +74469,15 @@ public boolean equals(update_creation_metadata_args that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_dbname = true && this.isSetDbname(); boolean that_present_dbname = true && that.isSetDbname(); if (this_present_dbname || that_present_dbname) { @@ -70309,6 +74512,11 @@ public boolean equals(update_creation_metadata_args that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_dbname = true && (isSetDbname()); list.add(present_dbname); if (present_dbname) @@ -70335,6 +74543,16 @@ public int compareTo(update_creation_metadata_args other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetDbname()).compareTo(other.isSetDbname()); if (lastComparison != 0) { return lastComparison; @@ -70385,6 +74603,14 @@ public String toString() { StringBuilder sb = new StringBuilder("update_creation_metadata_args("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("dbname:"); if (this.dbname == null) { sb.append("null"); @@ -70454,7 +74680,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_creation_met break; } switch (schemeField.id) { - case 1: // DBNAME + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DBNAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.dbname = iprot.readString(); struct.setDbnameIsSet(true); @@ -70462,7 +74696,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_creation_met org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TBL_NAME + case 3: // TBL_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); @@ -70470,7 +74704,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_creation_met org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // CREATION_METADATA + case 4: // CREATION_METADATA if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.creation_metadata = new CreationMetadata(); struct.creation_metadata.read(iprot); @@ -70492,6 +74726,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, update_creation_me struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.dbname != null) { oprot.writeFieldBegin(DBNAME_FIELD_DESC); oprot.writeString(struct.dbname); @@ -70525,16 +74764,22 @@ public update_creation_metadata_argsTupleScheme getScheme() { public void write(org.apache.thrift.protocol.TProtocol prot, update_creation_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetDbname()) { + if (struct.isSetCatName()) { optionals.set(0); } - if (struct.isSetTbl_name()) { + if (struct.isSetDbname()) { optionals.set(1); } - if (struct.isSetCreation_metadata()) { + if (struct.isSetTbl_name()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetCreation_metadata()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } if (struct.isSetDbname()) { oprot.writeString(struct.dbname); } @@ -70549,16 +74794,20 @@ public void write(org.apache.thrift.protocol.TProtocol prot, update_creation_met @Override public void read(org.apache.thrift.protocol.TProtocol prot, update_creation_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } + if (incoming.get(1)) { struct.dbname = iprot.readString(); struct.setDbnameIsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); } - if (incoming.get(2)) { + if (incoming.get(3)) { struct.creation_metadata = new CreationMetadata(); struct.creation_metadata.read(iprot); struct.setCreation_metadataIsSet(true); @@ -72261,13 +76510,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1092 = iprot.readListBegin(); - struct.success = new ArrayList(_list1092.size); - String _elem1093; - for (int _i1094 = 0; _i1094 < _list1092.size; ++_i1094) + org.apache.thrift.protocol.TList _list1100 = iprot.readListBegin(); + struct.success = new ArrayList(_list1100.size); + String _elem1101; + for (int _i1102 = 0; _i1102 < _list1100.size; ++_i1102) { - _elem1093 = iprot.readString(); - struct.success.add(_elem1093); + _elem1101 = iprot.readString(); + struct.success.add(_elem1101); } iprot.readListEnd(); } @@ -72320,9 +76569,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1095 : struct.success) + for (String _iter1103 : struct.success) { - oprot.writeString(_iter1095); + oprot.writeString(_iter1103); } oprot.writeListEnd(); } @@ -72377,9 +76626,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1096 : struct.success) + for (String _iter1104 : struct.success) { - oprot.writeString(_iter1096); + oprot.writeString(_iter1104); } } } @@ -72400,13 +76649,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1097 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1097.size); - String _elem1098; - for (int _i1099 = 0; _i1099 < _list1097.size; ++_i1099) + org.apache.thrift.protocol.TList _list1105 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1105.size); + String _elem1106; + for (int _i1107 = 0; _i1107 < _list1105.size; ++_i1107) { - _elem1098 = iprot.readString(); - struct.success.add(_elem1098); + _elem1106 = iprot.readString(); + struct.success.add(_elem1106); } } struct.setSuccessIsSet(true); @@ -78265,14 +82514,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1100 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1100.size); - Partition _elem1101; - for (int _i1102 = 0; _i1102 < _list1100.size; ++_i1102) + org.apache.thrift.protocol.TList _list1108 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1108.size); + Partition _elem1109; + for (int _i1110 = 0; _i1110 < _list1108.size; ++_i1110) { - _elem1101 = new Partition(); - _elem1101.read(iprot); - struct.new_parts.add(_elem1101); + _elem1109 = new Partition(); + _elem1109.read(iprot); + struct.new_parts.add(_elem1109); } iprot.readListEnd(); } @@ -78298,9 +82547,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1103 : struct.new_parts) + for (Partition _iter1111 : struct.new_parts) { - _iter1103.write(oprot); + _iter1111.write(oprot); } oprot.writeListEnd(); } @@ -78331,9 +82580,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1104 : struct.new_parts) + for (Partition _iter1112 : struct.new_parts) { - _iter1104.write(oprot); + _iter1112.write(oprot); } } } @@ -78345,14 +82594,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1105 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1105.size); - Partition _elem1106; - for (int _i1107 = 0; _i1107 < _list1105.size; ++_i1107) + org.apache.thrift.protocol.TList _list1113 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1113.size); + Partition _elem1114; + for (int _i1115 = 0; _i1115 < _list1113.size; ++_i1115) { - _elem1106 = new Partition(); - _elem1106.read(iprot); - struct.new_parts.add(_elem1106); + _elem1114 = new Partition(); + _elem1114.read(iprot); + struct.new_parts.add(_elem1114); } } struct.setNew_partsIsSet(true); @@ -79353,14 +83602,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1108 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1108.size); - PartitionSpec _elem1109; - for (int _i1110 = 0; _i1110 < _list1108.size; ++_i1110) + org.apache.thrift.protocol.TList _list1116 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1116.size); + PartitionSpec _elem1117; + for (int _i1118 = 0; _i1118 < _list1116.size; ++_i1118) { - _elem1109 = new PartitionSpec(); - _elem1109.read(iprot); - struct.new_parts.add(_elem1109); + _elem1117 = new PartitionSpec(); + _elem1117.read(iprot); + struct.new_parts.add(_elem1117); } iprot.readListEnd(); } @@ -79386,9 +83635,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter1111 : struct.new_parts) + for (PartitionSpec _iter1119 : struct.new_parts) { - _iter1111.write(oprot); + _iter1119.write(oprot); } oprot.writeListEnd(); } @@ -79419,9 +83668,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter1112 : struct.new_parts) + for (PartitionSpec _iter1120 : struct.new_parts) { - _iter1112.write(oprot); + _iter1120.write(oprot); } } } @@ -79433,14 +83682,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1113 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1113.size); - PartitionSpec _elem1114; - for (int _i1115 = 0; _i1115 < _list1113.size; ++_i1115) + org.apache.thrift.protocol.TList _list1121 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1121.size); + PartitionSpec _elem1122; + for (int _i1123 = 0; _i1123 < _list1121.size; ++_i1123) { - _elem1114 = new PartitionSpec(); - _elem1114.read(iprot); - struct.new_parts.add(_elem1114); + _elem1122 = new PartitionSpec(); + _elem1122.read(iprot); + struct.new_parts.add(_elem1122); } } struct.setNew_partsIsSet(true); @@ -80616,13 +84865,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1116 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1116.size); - String _elem1117; - for (int _i1118 = 0; _i1118 < _list1116.size; ++_i1118) + org.apache.thrift.protocol.TList _list1124 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1124.size); + String _elem1125; + for (int _i1126 = 0; _i1126 < _list1124.size; ++_i1126) { - _elem1117 = iprot.readString(); - struct.part_vals.add(_elem1117); + _elem1125 = iprot.readString(); + struct.part_vals.add(_elem1125); } iprot.readListEnd(); } @@ -80658,9 +84907,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1119 : struct.part_vals) + for (String _iter1127 : struct.part_vals) { - oprot.writeString(_iter1119); + oprot.writeString(_iter1127); } oprot.writeListEnd(); } @@ -80703,9 +84952,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1120 : struct.part_vals) + for (String _iter1128 : struct.part_vals) { - oprot.writeString(_iter1120); + oprot.writeString(_iter1128); } } } @@ -80725,13 +84974,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1121 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1121.size); - String _elem1122; - for (int _i1123 = 0; _i1123 < _list1121.size; ++_i1123) + org.apache.thrift.protocol.TList _list1129 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1129.size); + String _elem1130; + for (int _i1131 = 0; _i1131 < _list1129.size; ++_i1131) { - _elem1122 = iprot.readString(); - struct.part_vals.add(_elem1122); + _elem1130 = iprot.readString(); + struct.part_vals.add(_elem1130); } } struct.setPart_valsIsSet(true); @@ -83040,13 +87289,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1124 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1124.size); - String _elem1125; - for (int _i1126 = 0; _i1126 < _list1124.size; ++_i1126) + org.apache.thrift.protocol.TList _list1132 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1132.size); + String _elem1133; + for (int _i1134 = 0; _i1134 < _list1132.size; ++_i1134) { - _elem1125 = iprot.readString(); - struct.part_vals.add(_elem1125); + _elem1133 = iprot.readString(); + struct.part_vals.add(_elem1133); } iprot.readListEnd(); } @@ -83091,9 +87340,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1127 : struct.part_vals) + for (String _iter1135 : struct.part_vals) { - oprot.writeString(_iter1127); + oprot.writeString(_iter1135); } oprot.writeListEnd(); } @@ -83144,9 +87393,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1128 : struct.part_vals) + for (String _iter1136 : struct.part_vals) { - oprot.writeString(_iter1128); + oprot.writeString(_iter1136); } } } @@ -83169,13 +87418,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1129 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1129.size); - String _elem1130; - for (int _i1131 = 0; _i1131 < _list1129.size; ++_i1131) + org.apache.thrift.protocol.TList _list1137 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1137.size); + String _elem1138; + for (int _i1139 = 0; _i1139 < _list1137.size; ++_i1139) { - _elem1130 = iprot.readString(); - struct.part_vals.add(_elem1130); + _elem1138 = iprot.readString(); + struct.part_vals.add(_elem1138); } } struct.setPart_valsIsSet(true); @@ -87045,13 +91294,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1132 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1132.size); - String _elem1133; - for (int _i1134 = 0; _i1134 < _list1132.size; ++_i1134) + org.apache.thrift.protocol.TList _list1140 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1140.size); + String _elem1141; + for (int _i1142 = 0; _i1142 < _list1140.size; ++_i1142) { - _elem1133 = iprot.readString(); - struct.part_vals.add(_elem1133); + _elem1141 = iprot.readString(); + struct.part_vals.add(_elem1141); } iprot.readListEnd(); } @@ -87095,9 +91344,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1135 : struct.part_vals) + for (String _iter1143 : struct.part_vals) { - oprot.writeString(_iter1135); + oprot.writeString(_iter1143); } oprot.writeListEnd(); } @@ -87146,9 +91395,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1136 : struct.part_vals) + for (String _iter1144 : struct.part_vals) { - oprot.writeString(_iter1136); + oprot.writeString(_iter1144); } } } @@ -87171,13 +91420,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1137 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1137.size); - String _elem1138; - for (int _i1139 = 0; _i1139 < _list1137.size; ++_i1139) + org.apache.thrift.protocol.TList _list1145 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1145.size); + String _elem1146; + for (int _i1147 = 0; _i1147 < _list1145.size; ++_i1147) { - _elem1138 = iprot.readString(); - struct.part_vals.add(_elem1138); + _elem1146 = iprot.readString(); + struct.part_vals.add(_elem1146); } } struct.setPart_valsIsSet(true); @@ -88416,13 +92665,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1140 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1140.size); - String _elem1141; - for (int _i1142 = 0; _i1142 < _list1140.size; ++_i1142) + org.apache.thrift.protocol.TList _list1148 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1148.size); + String _elem1149; + for (int _i1150 = 0; _i1150 < _list1148.size; ++_i1150) { - _elem1141 = iprot.readString(); - struct.part_vals.add(_elem1141); + _elem1149 = iprot.readString(); + struct.part_vals.add(_elem1149); } iprot.readListEnd(); } @@ -88475,9 +92724,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1143 : struct.part_vals) + for (String _iter1151 : struct.part_vals) { - oprot.writeString(_iter1143); + oprot.writeString(_iter1151); } oprot.writeListEnd(); } @@ -88534,9 +92783,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1144 : struct.part_vals) + for (String _iter1152 : struct.part_vals) { - oprot.writeString(_iter1144); + oprot.writeString(_iter1152); } } } @@ -88562,13 +92811,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1145 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1145.size); - String _elem1146; - for (int _i1147 = 0; _i1147 < _list1145.size; ++_i1147) + org.apache.thrift.protocol.TList _list1153 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1153.size); + String _elem1154; + for (int _i1155 = 0; _i1155 < _list1153.size; ++_i1155) { - _elem1146 = iprot.readString(); - struct.part_vals.add(_elem1146); + _elem1154 = iprot.readString(); + struct.part_vals.add(_elem1154); } } struct.setPart_valsIsSet(true); @@ -93170,13 +97419,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1148 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1148.size); - String _elem1149; - for (int _i1150 = 0; _i1150 < _list1148.size; ++_i1150) + org.apache.thrift.protocol.TList _list1156 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1156.size); + String _elem1157; + for (int _i1158 = 0; _i1158 < _list1156.size; ++_i1158) { - _elem1149 = iprot.readString(); - struct.part_vals.add(_elem1149); + _elem1157 = iprot.readString(); + struct.part_vals.add(_elem1157); } iprot.readListEnd(); } @@ -93212,9 +97461,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1151 : struct.part_vals) + for (String _iter1159 : struct.part_vals) { - oprot.writeString(_iter1151); + oprot.writeString(_iter1159); } oprot.writeListEnd(); } @@ -93257,9 +97506,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1152 : struct.part_vals) + for (String _iter1160 : struct.part_vals) { - oprot.writeString(_iter1152); + oprot.writeString(_iter1160); } } } @@ -93279,13 +97528,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1153 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1153.size); - String _elem1154; - for (int _i1155 = 0; _i1155 < _list1153.size; ++_i1155) + org.apache.thrift.protocol.TList _list1161 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1161.size); + String _elem1162; + for (int _i1163 = 0; _i1163 < _list1161.size; ++_i1163) { - _elem1154 = iprot.readString(); - struct.part_vals.add(_elem1154); + _elem1162 = iprot.readString(); + struct.part_vals.add(_elem1162); } } struct.setPart_valsIsSet(true); @@ -94503,15 +98752,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1156 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1156.size); - String _key1157; - String _val1158; - for (int _i1159 = 0; _i1159 < _map1156.size; ++_i1159) + org.apache.thrift.protocol.TMap _map1164 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1164.size); + String _key1165; + String _val1166; + for (int _i1167 = 0; _i1167 < _map1164.size; ++_i1167) { - _key1157 = iprot.readString(); - _val1158 = iprot.readString(); - struct.partitionSpecs.put(_key1157, _val1158); + _key1165 = iprot.readString(); + _val1166 = iprot.readString(); + struct.partitionSpecs.put(_key1165, _val1166); } iprot.readMapEnd(); } @@ -94569,10 +98818,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1160 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1168 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1160.getKey()); - oprot.writeString(_iter1160.getValue()); + oprot.writeString(_iter1168.getKey()); + oprot.writeString(_iter1168.getValue()); } oprot.writeMapEnd(); } @@ -94635,10 +98884,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1161 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1169 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1161.getKey()); - oprot.writeString(_iter1161.getValue()); + oprot.writeString(_iter1169.getKey()); + oprot.writeString(_iter1169.getValue()); } } } @@ -94662,15 +98911,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1162 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1162.size); - String _key1163; - String _val1164; - for (int _i1165 = 0; _i1165 < _map1162.size; ++_i1165) + org.apache.thrift.protocol.TMap _map1170 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1170.size); + String _key1171; + String _val1172; + for (int _i1173 = 0; _i1173 < _map1170.size; ++_i1173) { - _key1163 = iprot.readString(); - _val1164 = iprot.readString(); - struct.partitionSpecs.put(_key1163, _val1164); + _key1171 = iprot.readString(); + _val1172 = iprot.readString(); + struct.partitionSpecs.put(_key1171, _val1172); } } struct.setPartitionSpecsIsSet(true); @@ -96116,15 +100365,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1166 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1166.size); - String _key1167; - String _val1168; - for (int _i1169 = 0; _i1169 < _map1166.size; ++_i1169) + org.apache.thrift.protocol.TMap _map1174 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1174.size); + String _key1175; + String _val1176; + for (int _i1177 = 0; _i1177 < _map1174.size; ++_i1177) { - _key1167 = iprot.readString(); - _val1168 = iprot.readString(); - struct.partitionSpecs.put(_key1167, _val1168); + _key1175 = iprot.readString(); + _val1176 = iprot.readString(); + struct.partitionSpecs.put(_key1175, _val1176); } iprot.readMapEnd(); } @@ -96182,10 +100431,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1170 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1178 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1170.getKey()); - oprot.writeString(_iter1170.getValue()); + oprot.writeString(_iter1178.getKey()); + oprot.writeString(_iter1178.getValue()); } oprot.writeMapEnd(); } @@ -96248,10 +100497,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1171 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1179 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1171.getKey()); - oprot.writeString(_iter1171.getValue()); + oprot.writeString(_iter1179.getKey()); + oprot.writeString(_iter1179.getValue()); } } } @@ -96275,15 +100524,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1172 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1172.size); - String _key1173; - String _val1174; - for (int _i1175 = 0; _i1175 < _map1172.size; ++_i1175) + org.apache.thrift.protocol.TMap _map1180 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1180.size); + String _key1181; + String _val1182; + for (int _i1183 = 0; _i1183 < _map1180.size; ++_i1183) { - _key1173 = iprot.readString(); - _val1174 = iprot.readString(); - struct.partitionSpecs.put(_key1173, _val1174); + _key1181 = iprot.readString(); + _val1182 = iprot.readString(); + struct.partitionSpecs.put(_key1181, _val1182); } } struct.setPartitionSpecsIsSet(true); @@ -96948,14 +101197,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1176 = iprot.readListBegin(); - struct.success = new ArrayList(_list1176.size); - Partition _elem1177; - for (int _i1178 = 0; _i1178 < _list1176.size; ++_i1178) + org.apache.thrift.protocol.TList _list1184 = iprot.readListBegin(); + struct.success = new ArrayList(_list1184.size); + Partition _elem1185; + for (int _i1186 = 0; _i1186 < _list1184.size; ++_i1186) { - _elem1177 = new Partition(); - _elem1177.read(iprot); - struct.success.add(_elem1177); + _elem1185 = new Partition(); + _elem1185.read(iprot); + struct.success.add(_elem1185); } iprot.readListEnd(); } @@ -97017,9 +101266,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1179 : struct.success) + for (Partition _iter1187 : struct.success) { - _iter1179.write(oprot); + _iter1187.write(oprot); } oprot.writeListEnd(); } @@ -97082,9 +101331,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1180 : struct.success) + for (Partition _iter1188 : struct.success) { - _iter1180.write(oprot); + _iter1188.write(oprot); } } } @@ -97108,14 +101357,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1181 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1181.size); - Partition _elem1182; - for (int _i1183 = 0; _i1183 < _list1181.size; ++_i1183) + org.apache.thrift.protocol.TList _list1189 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1189.size); + Partition _elem1190; + for (int _i1191 = 0; _i1191 < _list1189.size; ++_i1191) { - _elem1182 = new Partition(); - _elem1182.read(iprot); - struct.success.add(_elem1182); + _elem1190 = new Partition(); + _elem1190.read(iprot); + struct.success.add(_elem1190); } } struct.setSuccessIsSet(true); @@ -97814,13 +102063,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1184 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1184.size); - String _elem1185; - for (int _i1186 = 0; _i1186 < _list1184.size; ++_i1186) + org.apache.thrift.protocol.TList _list1192 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1192.size); + String _elem1193; + for (int _i1194 = 0; _i1194 < _list1192.size; ++_i1194) { - _elem1185 = iprot.readString(); - struct.part_vals.add(_elem1185); + _elem1193 = iprot.readString(); + struct.part_vals.add(_elem1193); } iprot.readListEnd(); } @@ -97840,13 +102089,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1187 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1187.size); - String _elem1188; - for (int _i1189 = 0; _i1189 < _list1187.size; ++_i1189) + org.apache.thrift.protocol.TList _list1195 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1195.size); + String _elem1196; + for (int _i1197 = 0; _i1197 < _list1195.size; ++_i1197) { - _elem1188 = iprot.readString(); - struct.group_names.add(_elem1188); + _elem1196 = iprot.readString(); + struct.group_names.add(_elem1196); } iprot.readListEnd(); } @@ -97882,9 +102131,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1190 : struct.part_vals) + for (String _iter1198 : struct.part_vals) { - oprot.writeString(_iter1190); + oprot.writeString(_iter1198); } oprot.writeListEnd(); } @@ -97899,9 +102148,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1191 : struct.group_names) + for (String _iter1199 : struct.group_names) { - oprot.writeString(_iter1191); + oprot.writeString(_iter1199); } oprot.writeListEnd(); } @@ -97950,9 +102199,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1192 : struct.part_vals) + for (String _iter1200 : struct.part_vals) { - oprot.writeString(_iter1192); + oprot.writeString(_iter1200); } } } @@ -97962,9 +102211,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1193 : struct.group_names) + for (String _iter1201 : struct.group_names) { - oprot.writeString(_iter1193); + oprot.writeString(_iter1201); } } } @@ -97984,13 +102233,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1194 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1194.size); - String _elem1195; - for (int _i1196 = 0; _i1196 < _list1194.size; ++_i1196) + org.apache.thrift.protocol.TList _list1202 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1202.size); + String _elem1203; + for (int _i1204 = 0; _i1204 < _list1202.size; ++_i1204) { - _elem1195 = iprot.readString(); - struct.part_vals.add(_elem1195); + _elem1203 = iprot.readString(); + struct.part_vals.add(_elem1203); } } struct.setPart_valsIsSet(true); @@ -98001,13 +102250,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1197 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1197.size); - String _elem1198; - for (int _i1199 = 0; _i1199 < _list1197.size; ++_i1199) + org.apache.thrift.protocol.TList _list1205 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1205.size); + String _elem1206; + for (int _i1207 = 0; _i1207 < _list1205.size; ++_i1207) { - _elem1198 = iprot.readString(); - struct.group_names.add(_elem1198); + _elem1206 = iprot.readString(); + struct.group_names.add(_elem1206); } } struct.setGroup_namesIsSet(true); @@ -100776,14 +105025,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1200 = iprot.readListBegin(); - struct.success = new ArrayList(_list1200.size); - Partition _elem1201; - for (int _i1202 = 0; _i1202 < _list1200.size; ++_i1202) + org.apache.thrift.protocol.TList _list1208 = iprot.readListBegin(); + struct.success = new ArrayList(_list1208.size); + Partition _elem1209; + for (int _i1210 = 0; _i1210 < _list1208.size; ++_i1210) { - _elem1201 = new Partition(); - _elem1201.read(iprot); - struct.success.add(_elem1201); + _elem1209 = new Partition(); + _elem1209.read(iprot); + struct.success.add(_elem1209); } iprot.readListEnd(); } @@ -100827,9 +105076,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1203 : struct.success) + for (Partition _iter1211 : struct.success) { - _iter1203.write(oprot); + _iter1211.write(oprot); } oprot.writeListEnd(); } @@ -100876,9 +105125,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1204 : struct.success) + for (Partition _iter1212 : struct.success) { - _iter1204.write(oprot); + _iter1212.write(oprot); } } } @@ -100896,14 +105145,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1205 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1205.size); - Partition _elem1206; - for (int _i1207 = 0; _i1207 < _list1205.size; ++_i1207) + org.apache.thrift.protocol.TList _list1213 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1213.size); + Partition _elem1214; + for (int _i1215 = 0; _i1215 < _list1213.size; ++_i1215) { - _elem1206 = new Partition(); - _elem1206.read(iprot); - struct.success.add(_elem1206); + _elem1214 = new Partition(); + _elem1214.read(iprot); + struct.success.add(_elem1214); } } struct.setSuccessIsSet(true); @@ -101593,13 +105842,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1208 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1208.size); - String _elem1209; - for (int _i1210 = 0; _i1210 < _list1208.size; ++_i1210) + org.apache.thrift.protocol.TList _list1216 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1216.size); + String _elem1217; + for (int _i1218 = 0; _i1218 < _list1216.size; ++_i1218) { - _elem1209 = iprot.readString(); - struct.group_names.add(_elem1209); + _elem1217 = iprot.readString(); + struct.group_names.add(_elem1217); } iprot.readListEnd(); } @@ -101643,9 +105892,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1211 : struct.group_names) + for (String _iter1219 : struct.group_names) { - oprot.writeString(_iter1211); + oprot.writeString(_iter1219); } oprot.writeListEnd(); } @@ -101700,9 +105949,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1212 : struct.group_names) + for (String _iter1220 : struct.group_names) { - oprot.writeString(_iter1212); + oprot.writeString(_iter1220); } } } @@ -101730,13 +105979,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1213 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1213.size); - String _elem1214; - for (int _i1215 = 0; _i1215 < _list1213.size; ++_i1215) + org.apache.thrift.protocol.TList _list1221 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1221.size); + String _elem1222; + for (int _i1223 = 0; _i1223 < _list1221.size; ++_i1223) { - _elem1214 = iprot.readString(); - struct.group_names.add(_elem1214); + _elem1222 = iprot.readString(); + struct.group_names.add(_elem1222); } } struct.setGroup_namesIsSet(true); @@ -102223,14 +106472,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1216 = iprot.readListBegin(); - struct.success = new ArrayList(_list1216.size); - Partition _elem1217; - for (int _i1218 = 0; _i1218 < _list1216.size; ++_i1218) + org.apache.thrift.protocol.TList _list1224 = iprot.readListBegin(); + struct.success = new ArrayList(_list1224.size); + Partition _elem1225; + for (int _i1226 = 0; _i1226 < _list1224.size; ++_i1226) { - _elem1217 = new Partition(); - _elem1217.read(iprot); - struct.success.add(_elem1217); + _elem1225 = new Partition(); + _elem1225.read(iprot); + struct.success.add(_elem1225); } iprot.readListEnd(); } @@ -102274,9 +106523,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1219 : struct.success) + for (Partition _iter1227 : struct.success) { - _iter1219.write(oprot); + _iter1227.write(oprot); } oprot.writeListEnd(); } @@ -102323,9 +106572,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1220 : struct.success) + for (Partition _iter1228 : struct.success) { - _iter1220.write(oprot); + _iter1228.write(oprot); } } } @@ -102343,14 +106592,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1221 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1221.size); - Partition _elem1222; - for (int _i1223 = 0; _i1223 < _list1221.size; ++_i1223) + org.apache.thrift.protocol.TList _list1229 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1229.size); + Partition _elem1230; + for (int _i1231 = 0; _i1231 < _list1229.size; ++_i1231) { - _elem1222 = new Partition(); - _elem1222.read(iprot); - struct.success.add(_elem1222); + _elem1230 = new Partition(); + _elem1230.read(iprot); + struct.success.add(_elem1230); } } struct.setSuccessIsSet(true); @@ -103413,14 +107662,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1224 = iprot.readListBegin(); - struct.success = new ArrayList(_list1224.size); - PartitionSpec _elem1225; - for (int _i1226 = 0; _i1226 < _list1224.size; ++_i1226) + org.apache.thrift.protocol.TList _list1232 = iprot.readListBegin(); + struct.success = new ArrayList(_list1232.size); + PartitionSpec _elem1233; + for (int _i1234 = 0; _i1234 < _list1232.size; ++_i1234) { - _elem1225 = new PartitionSpec(); - _elem1225.read(iprot); - struct.success.add(_elem1225); + _elem1233 = new PartitionSpec(); + _elem1233.read(iprot); + struct.success.add(_elem1233); } iprot.readListEnd(); } @@ -103464,9 +107713,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1227 : struct.success) + for (PartitionSpec _iter1235 : struct.success) { - _iter1227.write(oprot); + _iter1235.write(oprot); } oprot.writeListEnd(); } @@ -103513,9 +107762,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1228 : struct.success) + for (PartitionSpec _iter1236 : struct.success) { - _iter1228.write(oprot); + _iter1236.write(oprot); } } } @@ -103533,14 +107782,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1229 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1229.size); - PartitionSpec _elem1230; - for (int _i1231 = 0; _i1231 < _list1229.size; ++_i1231) + org.apache.thrift.protocol.TList _list1237 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1237.size); + PartitionSpec _elem1238; + for (int _i1239 = 0; _i1239 < _list1237.size; ++_i1239) { - _elem1230 = new PartitionSpec(); - _elem1230.read(iprot); - struct.success.add(_elem1230); + _elem1238 = new PartitionSpec(); + _elem1238.read(iprot); + struct.success.add(_elem1238); } } struct.setSuccessIsSet(true); @@ -104600,13 +108849,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1232 = iprot.readListBegin(); - struct.success = new ArrayList(_list1232.size); - String _elem1233; - for (int _i1234 = 0; _i1234 < _list1232.size; ++_i1234) + org.apache.thrift.protocol.TList _list1240 = iprot.readListBegin(); + struct.success = new ArrayList(_list1240.size); + String _elem1241; + for (int _i1242 = 0; _i1242 < _list1240.size; ++_i1242) { - _elem1233 = iprot.readString(); - struct.success.add(_elem1233); + _elem1241 = iprot.readString(); + struct.success.add(_elem1241); } iprot.readListEnd(); } @@ -104650,9 +108899,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1235 : struct.success) + for (String _iter1243 : struct.success) { - oprot.writeString(_iter1235); + oprot.writeString(_iter1243); } oprot.writeListEnd(); } @@ -104699,9 +108948,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1236 : struct.success) + for (String _iter1244 : struct.success) { - oprot.writeString(_iter1236); + oprot.writeString(_iter1244); } } } @@ -104719,13 +108968,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1237 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1237.size); - String _elem1238; - for (int _i1239 = 0; _i1239 < _list1237.size; ++_i1239) + org.apache.thrift.protocol.TList _list1245 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1245.size); + String _elem1246; + for (int _i1247 = 0; _i1247 < _list1245.size; ++_i1247) { - _elem1238 = iprot.readString(); - struct.success.add(_elem1238); + _elem1246 = iprot.readString(); + struct.success.add(_elem1246); } } struct.setSuccessIsSet(true); @@ -106256,13 +110505,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1240 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1240.size); - String _elem1241; - for (int _i1242 = 0; _i1242 < _list1240.size; ++_i1242) + org.apache.thrift.protocol.TList _list1248 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1248.size); + String _elem1249; + for (int _i1250 = 0; _i1250 < _list1248.size; ++_i1250) { - _elem1241 = iprot.readString(); - struct.part_vals.add(_elem1241); + _elem1249 = iprot.readString(); + struct.part_vals.add(_elem1249); } iprot.readListEnd(); } @@ -106306,9 +110555,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1243 : struct.part_vals) + for (String _iter1251 : struct.part_vals) { - oprot.writeString(_iter1243); + oprot.writeString(_iter1251); } oprot.writeListEnd(); } @@ -106357,9 +110606,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1244 : struct.part_vals) + for (String _iter1252 : struct.part_vals) { - oprot.writeString(_iter1244); + oprot.writeString(_iter1252); } } } @@ -106382,13 +110631,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1245 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1245.size); - String _elem1246; - for (int _i1247 = 0; _i1247 < _list1245.size; ++_i1247) + org.apache.thrift.protocol.TList _list1253 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1253.size); + String _elem1254; + for (int _i1255 = 0; _i1255 < _list1253.size; ++_i1255) { - _elem1246 = iprot.readString(); - struct.part_vals.add(_elem1246); + _elem1254 = iprot.readString(); + struct.part_vals.add(_elem1254); } } struct.setPart_valsIsSet(true); @@ -106879,14 +111128,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1248 = iprot.readListBegin(); - struct.success = new ArrayList(_list1248.size); - Partition _elem1249; - for (int _i1250 = 0; _i1250 < _list1248.size; ++_i1250) + org.apache.thrift.protocol.TList _list1256 = iprot.readListBegin(); + struct.success = new ArrayList(_list1256.size); + Partition _elem1257; + for (int _i1258 = 0; _i1258 < _list1256.size; ++_i1258) { - _elem1249 = new Partition(); - _elem1249.read(iprot); - struct.success.add(_elem1249); + _elem1257 = new Partition(); + _elem1257.read(iprot); + struct.success.add(_elem1257); } iprot.readListEnd(); } @@ -106930,9 +111179,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1251 : struct.success) + for (Partition _iter1259 : struct.success) { - _iter1251.write(oprot); + _iter1259.write(oprot); } oprot.writeListEnd(); } @@ -106979,9 +111228,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1252 : struct.success) + for (Partition _iter1260 : struct.success) { - _iter1252.write(oprot); + _iter1260.write(oprot); } } } @@ -106999,14 +111248,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1253 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1253.size); - Partition _elem1254; - for (int _i1255 = 0; _i1255 < _list1253.size; ++_i1255) + org.apache.thrift.protocol.TList _list1261 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1261.size); + Partition _elem1262; + for (int _i1263 = 0; _i1263 < _list1261.size; ++_i1263) { - _elem1254 = new Partition(); - _elem1254.read(iprot); - struct.success.add(_elem1254); + _elem1262 = new Partition(); + _elem1262.read(iprot); + struct.success.add(_elem1262); } } struct.setSuccessIsSet(true); @@ -107778,13 +112027,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1256 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1256.size); - String _elem1257; - for (int _i1258 = 0; _i1258 < _list1256.size; ++_i1258) + org.apache.thrift.protocol.TList _list1264 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1264.size); + String _elem1265; + for (int _i1266 = 0; _i1266 < _list1264.size; ++_i1266) { - _elem1257 = iprot.readString(); - struct.part_vals.add(_elem1257); + _elem1265 = iprot.readString(); + struct.part_vals.add(_elem1265); } iprot.readListEnd(); } @@ -107812,13 +112061,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1259 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1259.size); - String _elem1260; - for (int _i1261 = 0; _i1261 < _list1259.size; ++_i1261) + org.apache.thrift.protocol.TList _list1267 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1267.size); + String _elem1268; + for (int _i1269 = 0; _i1269 < _list1267.size; ++_i1269) { - _elem1260 = iprot.readString(); - struct.group_names.add(_elem1260); + _elem1268 = iprot.readString(); + struct.group_names.add(_elem1268); } iprot.readListEnd(); } @@ -107854,9 +112103,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1262 : struct.part_vals) + for (String _iter1270 : struct.part_vals) { - oprot.writeString(_iter1262); + oprot.writeString(_iter1270); } oprot.writeListEnd(); } @@ -107874,9 +112123,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1263 : struct.group_names) + for (String _iter1271 : struct.group_names) { - oprot.writeString(_iter1263); + oprot.writeString(_iter1271); } oprot.writeListEnd(); } @@ -107928,9 +112177,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1264 : struct.part_vals) + for (String _iter1272 : struct.part_vals) { - oprot.writeString(_iter1264); + oprot.writeString(_iter1272); } } } @@ -107943,9 +112192,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1265 : struct.group_names) + for (String _iter1273 : struct.group_names) { - oprot.writeString(_iter1265); + oprot.writeString(_iter1273); } } } @@ -107965,13 +112214,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1266 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1266.size); - String _elem1267; - for (int _i1268 = 0; _i1268 < _list1266.size; ++_i1268) + org.apache.thrift.protocol.TList _list1274 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1274.size); + String _elem1275; + for (int _i1276 = 0; _i1276 < _list1274.size; ++_i1276) { - _elem1267 = iprot.readString(); - struct.part_vals.add(_elem1267); + _elem1275 = iprot.readString(); + struct.part_vals.add(_elem1275); } } struct.setPart_valsIsSet(true); @@ -107986,13 +112235,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1269 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1269.size); - String _elem1270; - for (int _i1271 = 0; _i1271 < _list1269.size; ++_i1271) + org.apache.thrift.protocol.TList _list1277 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1277.size); + String _elem1278; + for (int _i1279 = 0; _i1279 < _list1277.size; ++_i1279) { - _elem1270 = iprot.readString(); - struct.group_names.add(_elem1270); + _elem1278 = iprot.readString(); + struct.group_names.add(_elem1278); } } struct.setGroup_namesIsSet(true); @@ -108479,14 +112728,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1272 = iprot.readListBegin(); - struct.success = new ArrayList(_list1272.size); - Partition _elem1273; - for (int _i1274 = 0; _i1274 < _list1272.size; ++_i1274) + org.apache.thrift.protocol.TList _list1280 = iprot.readListBegin(); + struct.success = new ArrayList(_list1280.size); + Partition _elem1281; + for (int _i1282 = 0; _i1282 < _list1280.size; ++_i1282) { - _elem1273 = new Partition(); - _elem1273.read(iprot); - struct.success.add(_elem1273); + _elem1281 = new Partition(); + _elem1281.read(iprot); + struct.success.add(_elem1281); } iprot.readListEnd(); } @@ -108530,9 +112779,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1275 : struct.success) + for (Partition _iter1283 : struct.success) { - _iter1275.write(oprot); + _iter1283.write(oprot); } oprot.writeListEnd(); } @@ -108579,9 +112828,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1276 : struct.success) + for (Partition _iter1284 : struct.success) { - _iter1276.write(oprot); + _iter1284.write(oprot); } } } @@ -108599,14 +112848,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1277 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1277.size); - Partition _elem1278; - for (int _i1279 = 0; _i1279 < _list1277.size; ++_i1279) + org.apache.thrift.protocol.TList _list1285 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1285.size); + Partition _elem1286; + for (int _i1287 = 0; _i1287 < _list1285.size; ++_i1287) { - _elem1278 = new Partition(); - _elem1278.read(iprot); - struct.success.add(_elem1278); + _elem1286 = new Partition(); + _elem1286.read(iprot); + struct.success.add(_elem1286); } } struct.setSuccessIsSet(true); @@ -109199,13 +113448,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1280 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1280.size); - String _elem1281; - for (int _i1282 = 0; _i1282 < _list1280.size; ++_i1282) + org.apache.thrift.protocol.TList _list1288 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1288.size); + String _elem1289; + for (int _i1290 = 0; _i1290 < _list1288.size; ++_i1290) { - _elem1281 = iprot.readString(); - struct.part_vals.add(_elem1281); + _elem1289 = iprot.readString(); + struct.part_vals.add(_elem1289); } iprot.readListEnd(); } @@ -109249,9 +113498,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1283 : struct.part_vals) + for (String _iter1291 : struct.part_vals) { - oprot.writeString(_iter1283); + oprot.writeString(_iter1291); } oprot.writeListEnd(); } @@ -109300,9 +113549,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1284 : struct.part_vals) + for (String _iter1292 : struct.part_vals) { - oprot.writeString(_iter1284); + oprot.writeString(_iter1292); } } } @@ -109325,13 +113574,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1285 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1285.size); - String _elem1286; - for (int _i1287 = 0; _i1287 < _list1285.size; ++_i1287) + org.apache.thrift.protocol.TList _list1293 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1293.size); + String _elem1294; + for (int _i1295 = 0; _i1295 < _list1293.size; ++_i1295) { - _elem1286 = iprot.readString(); - struct.part_vals.add(_elem1286); + _elem1294 = iprot.readString(); + struct.part_vals.add(_elem1294); } } struct.setPart_valsIsSet(true); @@ -109819,13 +114068,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1288 = iprot.readListBegin(); - struct.success = new ArrayList(_list1288.size); - String _elem1289; - for (int _i1290 = 0; _i1290 < _list1288.size; ++_i1290) + org.apache.thrift.protocol.TList _list1296 = iprot.readListBegin(); + struct.success = new ArrayList(_list1296.size); + String _elem1297; + for (int _i1298 = 0; _i1298 < _list1296.size; ++_i1298) { - _elem1289 = iprot.readString(); - struct.success.add(_elem1289); + _elem1297 = iprot.readString(); + struct.success.add(_elem1297); } iprot.readListEnd(); } @@ -109869,9 +114118,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1291 : struct.success) + for (String _iter1299 : struct.success) { - oprot.writeString(_iter1291); + oprot.writeString(_iter1299); } oprot.writeListEnd(); } @@ -109918,9 +114167,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1292 : struct.success) + for (String _iter1300 : struct.success) { - oprot.writeString(_iter1292); + oprot.writeString(_iter1300); } } } @@ -109938,13 +114187,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1293 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1293.size); - String _elem1294; - for (int _i1295 = 0; _i1295 < _list1293.size; ++_i1295) + org.apache.thrift.protocol.TList _list1301 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1301.size); + String _elem1302; + for (int _i1303 = 0; _i1303 < _list1301.size; ++_i1303) { - _elem1294 = iprot.readString(); - struct.success.add(_elem1294); + _elem1302 = iprot.readString(); + struct.success.add(_elem1302); } } struct.setSuccessIsSet(true); @@ -111111,14 +115360,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1296 = iprot.readListBegin(); - struct.success = new ArrayList(_list1296.size); - Partition _elem1297; - for (int _i1298 = 0; _i1298 < _list1296.size; ++_i1298) + org.apache.thrift.protocol.TList _list1304 = iprot.readListBegin(); + struct.success = new ArrayList(_list1304.size); + Partition _elem1305; + for (int _i1306 = 0; _i1306 < _list1304.size; ++_i1306) { - _elem1297 = new Partition(); - _elem1297.read(iprot); - struct.success.add(_elem1297); + _elem1305 = new Partition(); + _elem1305.read(iprot); + struct.success.add(_elem1305); } iprot.readListEnd(); } @@ -111162,9 +115411,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1299 : struct.success) + for (Partition _iter1307 : struct.success) { - _iter1299.write(oprot); + _iter1307.write(oprot); } oprot.writeListEnd(); } @@ -111211,9 +115460,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1300 : struct.success) + for (Partition _iter1308 : struct.success) { - _iter1300.write(oprot); + _iter1308.write(oprot); } } } @@ -111231,14 +115480,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1301 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1301.size); - Partition _elem1302; - for (int _i1303 = 0; _i1303 < _list1301.size; ++_i1303) + org.apache.thrift.protocol.TList _list1309 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1309.size); + Partition _elem1310; + for (int _i1311 = 0; _i1311 < _list1309.size; ++_i1311) { - _elem1302 = new Partition(); - _elem1302.read(iprot); - struct.success.add(_elem1302); + _elem1310 = new Partition(); + _elem1310.read(iprot); + struct.success.add(_elem1310); } } struct.setSuccessIsSet(true); @@ -112405,14 +116654,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1304 = iprot.readListBegin(); - struct.success = new ArrayList(_list1304.size); - PartitionSpec _elem1305; - for (int _i1306 = 0; _i1306 < _list1304.size; ++_i1306) + org.apache.thrift.protocol.TList _list1312 = iprot.readListBegin(); + struct.success = new ArrayList(_list1312.size); + PartitionSpec _elem1313; + for (int _i1314 = 0; _i1314 < _list1312.size; ++_i1314) { - _elem1305 = new PartitionSpec(); - _elem1305.read(iprot); - struct.success.add(_elem1305); + _elem1313 = new PartitionSpec(); + _elem1313.read(iprot); + struct.success.add(_elem1313); } iprot.readListEnd(); } @@ -112456,9 +116705,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1307 : struct.success) + for (PartitionSpec _iter1315 : struct.success) { - _iter1307.write(oprot); + _iter1315.write(oprot); } oprot.writeListEnd(); } @@ -112505,9 +116754,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1308 : struct.success) + for (PartitionSpec _iter1316 : struct.success) { - _iter1308.write(oprot); + _iter1316.write(oprot); } } } @@ -112525,14 +116774,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1309 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1309.size); - PartitionSpec _elem1310; - for (int _i1311 = 0; _i1311 < _list1309.size; ++_i1311) + org.apache.thrift.protocol.TList _list1317 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1317.size); + PartitionSpec _elem1318; + for (int _i1319 = 0; _i1319 < _list1317.size; ++_i1319) { - _elem1310 = new PartitionSpec(); - _elem1310.read(iprot); - struct.success.add(_elem1310); + _elem1318 = new PartitionSpec(); + _elem1318.read(iprot); + struct.success.add(_elem1318); } } struct.setSuccessIsSet(true); @@ -115116,13 +119365,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1312 = iprot.readListBegin(); - struct.names = new ArrayList(_list1312.size); - String _elem1313; - for (int _i1314 = 0; _i1314 < _list1312.size; ++_i1314) + org.apache.thrift.protocol.TList _list1320 = iprot.readListBegin(); + struct.names = new ArrayList(_list1320.size); + String _elem1321; + for (int _i1322 = 0; _i1322 < _list1320.size; ++_i1322) { - _elem1313 = iprot.readString(); - struct.names.add(_elem1313); + _elem1321 = iprot.readString(); + struct.names.add(_elem1321); } iprot.readListEnd(); } @@ -115158,9 +119407,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter1315 : struct.names) + for (String _iter1323 : struct.names) { - oprot.writeString(_iter1315); + oprot.writeString(_iter1323); } oprot.writeListEnd(); } @@ -115203,9 +119452,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter1316 : struct.names) + for (String _iter1324 : struct.names) { - oprot.writeString(_iter1316); + oprot.writeString(_iter1324); } } } @@ -115225,13 +119474,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1317 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list1317.size); - String _elem1318; - for (int _i1319 = 0; _i1319 < _list1317.size; ++_i1319) + org.apache.thrift.protocol.TList _list1325 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1325.size); + String _elem1326; + for (int _i1327 = 0; _i1327 < _list1325.size; ++_i1327) { - _elem1318 = iprot.readString(); - struct.names.add(_elem1318); + _elem1326 = iprot.readString(); + struct.names.add(_elem1326); } } struct.setNamesIsSet(true); @@ -115718,14 +119967,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1320 = iprot.readListBegin(); - struct.success = new ArrayList(_list1320.size); - Partition _elem1321; - for (int _i1322 = 0; _i1322 < _list1320.size; ++_i1322) + org.apache.thrift.protocol.TList _list1328 = iprot.readListBegin(); + struct.success = new ArrayList(_list1328.size); + Partition _elem1329; + for (int _i1330 = 0; _i1330 < _list1328.size; ++_i1330) { - _elem1321 = new Partition(); - _elem1321.read(iprot); - struct.success.add(_elem1321); + _elem1329 = new Partition(); + _elem1329.read(iprot); + struct.success.add(_elem1329); } iprot.readListEnd(); } @@ -115769,9 +120018,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1323 : struct.success) + for (Partition _iter1331 : struct.success) { - _iter1323.write(oprot); + _iter1331.write(oprot); } oprot.writeListEnd(); } @@ -115818,9 +120067,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1324 : struct.success) + for (Partition _iter1332 : struct.success) { - _iter1324.write(oprot); + _iter1332.write(oprot); } } } @@ -115838,14 +120087,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1325 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1325.size); - Partition _elem1326; - for (int _i1327 = 0; _i1327 < _list1325.size; ++_i1327) + org.apache.thrift.protocol.TList _list1333 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1333.size); + Partition _elem1334; + for (int _i1335 = 0; _i1335 < _list1333.size; ++_i1335) { - _elem1326 = new Partition(); - _elem1326.read(iprot); - struct.success.add(_elem1326); + _elem1334 = new Partition(); + _elem1334.read(iprot); + struct.success.add(_elem1334); } } struct.setSuccessIsSet(true); @@ -117395,14 +121644,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1328 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1328.size); - Partition _elem1329; - for (int _i1330 = 0; _i1330 < _list1328.size; ++_i1330) + org.apache.thrift.protocol.TList _list1336 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1336.size); + Partition _elem1337; + for (int _i1338 = 0; _i1338 < _list1336.size; ++_i1338) { - _elem1329 = new Partition(); - _elem1329.read(iprot); - struct.new_parts.add(_elem1329); + _elem1337 = new Partition(); + _elem1337.read(iprot); + struct.new_parts.add(_elem1337); } iprot.readListEnd(); } @@ -117438,9 +121687,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1331 : struct.new_parts) + for (Partition _iter1339 : struct.new_parts) { - _iter1331.write(oprot); + _iter1339.write(oprot); } oprot.writeListEnd(); } @@ -117483,9 +121732,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1332 : struct.new_parts) + for (Partition _iter1340 : struct.new_parts) { - _iter1332.write(oprot); + _iter1340.write(oprot); } } } @@ -117505,14 +121754,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1333 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1333.size); - Partition _elem1334; - for (int _i1335 = 0; _i1335 < _list1333.size; ++_i1335) + org.apache.thrift.protocol.TList _list1341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1341.size); + Partition _elem1342; + for (int _i1343 = 0; _i1343 < _list1341.size; ++_i1343) { - _elem1334 = new Partition(); - _elem1334.read(iprot); - struct.new_parts.add(_elem1334); + _elem1342 = new Partition(); + _elem1342.read(iprot); + struct.new_parts.add(_elem1342); } } struct.setNew_partsIsSet(true); @@ -118565,14 +122814,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1336 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1336.size); - Partition _elem1337; - for (int _i1338 = 0; _i1338 < _list1336.size; ++_i1338) + org.apache.thrift.protocol.TList _list1344 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1344.size); + Partition _elem1345; + for (int _i1346 = 0; _i1346 < _list1344.size; ++_i1346) { - _elem1337 = new Partition(); - _elem1337.read(iprot); - struct.new_parts.add(_elem1337); + _elem1345 = new Partition(); + _elem1345.read(iprot); + struct.new_parts.add(_elem1345); } iprot.readListEnd(); } @@ -118617,9 +122866,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1339 : struct.new_parts) + for (Partition _iter1347 : struct.new_parts) { - _iter1339.write(oprot); + _iter1347.write(oprot); } oprot.writeListEnd(); } @@ -118670,9 +122919,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1340 : struct.new_parts) + for (Partition _iter1348 : struct.new_parts) { - _iter1340.write(oprot); + _iter1348.write(oprot); } } } @@ -118695,14 +122944,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1341.size); - Partition _elem1342; - for (int _i1343 = 0; _i1343 < _list1341.size; ++_i1343) + org.apache.thrift.protocol.TList _list1349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1349.size); + Partition _elem1350; + for (int _i1351 = 0; _i1351 < _list1349.size; ++_i1351) { - _elem1342 = new Partition(); - _elem1342.read(iprot); - struct.new_parts.add(_elem1342); + _elem1350 = new Partition(); + _elem1350.read(iprot); + struct.new_parts.add(_elem1350); } } struct.setNew_partsIsSet(true); @@ -120903,13 +125152,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1344 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1344.size); - String _elem1345; - for (int _i1346 = 0; _i1346 < _list1344.size; ++_i1346) + org.apache.thrift.protocol.TList _list1352 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1352.size); + String _elem1353; + for (int _i1354 = 0; _i1354 < _list1352.size; ++_i1354) { - _elem1345 = iprot.readString(); - struct.part_vals.add(_elem1345); + _elem1353 = iprot.readString(); + struct.part_vals.add(_elem1353); } iprot.readListEnd(); } @@ -120954,9 +125203,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1347 : struct.part_vals) + for (String _iter1355 : struct.part_vals) { - oprot.writeString(_iter1347); + oprot.writeString(_iter1355); } oprot.writeListEnd(); } @@ -121007,9 +125256,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1348 : struct.part_vals) + for (String _iter1356 : struct.part_vals) { - oprot.writeString(_iter1348); + oprot.writeString(_iter1356); } } } @@ -121032,13 +125281,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1349.size); - String _elem1350; - for (int _i1351 = 0; _i1351 < _list1349.size; ++_i1351) + org.apache.thrift.protocol.TList _list1357 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1357.size); + String _elem1358; + for (int _i1359 = 0; _i1359 < _list1357.size; ++_i1359) { - _elem1350 = iprot.readString(); - struct.part_vals.add(_elem1350); + _elem1358 = iprot.readString(); + struct.part_vals.add(_elem1358); } } struct.setPart_valsIsSet(true); @@ -121912,13 +126161,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1352 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1352.size); - String _elem1353; - for (int _i1354 = 0; _i1354 < _list1352.size; ++_i1354) + org.apache.thrift.protocol.TList _list1360 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1360.size); + String _elem1361; + for (int _i1362 = 0; _i1362 < _list1360.size; ++_i1362) { - _elem1353 = iprot.readString(); - struct.part_vals.add(_elem1353); + _elem1361 = iprot.readString(); + struct.part_vals.add(_elem1361); } iprot.readListEnd(); } @@ -121952,9 +126201,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1355 : struct.part_vals) + for (String _iter1363 : struct.part_vals) { - oprot.writeString(_iter1355); + oprot.writeString(_iter1363); } oprot.writeListEnd(); } @@ -121991,9 +126240,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1356 : struct.part_vals) + for (String _iter1364 : struct.part_vals) { - oprot.writeString(_iter1356); + oprot.writeString(_iter1364); } } } @@ -122008,13 +126257,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1357 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1357.size); - String _elem1358; - for (int _i1359 = 0; _i1359 < _list1357.size; ++_i1359) + org.apache.thrift.protocol.TList _list1365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1365.size); + String _elem1366; + for (int _i1367 = 0; _i1367 < _list1365.size; ++_i1367) { - _elem1358 = iprot.readString(); - struct.part_vals.add(_elem1358); + _elem1366 = iprot.readString(); + struct.part_vals.add(_elem1366); } } struct.setPart_valsIsSet(true); @@ -124169,13 +128418,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1360 = iprot.readListBegin(); - struct.success = new ArrayList(_list1360.size); - String _elem1361; - for (int _i1362 = 0; _i1362 < _list1360.size; ++_i1362) + org.apache.thrift.protocol.TList _list1368 = iprot.readListBegin(); + struct.success = new ArrayList(_list1368.size); + String _elem1369; + for (int _i1370 = 0; _i1370 < _list1368.size; ++_i1370) { - _elem1361 = iprot.readString(); - struct.success.add(_elem1361); + _elem1369 = iprot.readString(); + struct.success.add(_elem1369); } iprot.readListEnd(); } @@ -124210,9 +128459,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1363 : struct.success) + for (String _iter1371 : struct.success) { - oprot.writeString(_iter1363); + oprot.writeString(_iter1371); } oprot.writeListEnd(); } @@ -124251,9 +128500,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1364 : struct.success) + for (String _iter1372 : struct.success) { - oprot.writeString(_iter1364); + oprot.writeString(_iter1372); } } } @@ -124268,13 +128517,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1365.size); - String _elem1366; - for (int _i1367 = 0; _i1367 < _list1365.size; ++_i1367) + org.apache.thrift.protocol.TList _list1373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1373.size); + String _elem1374; + for (int _i1375 = 0; _i1375 < _list1373.size; ++_i1375) { - _elem1366 = iprot.readString(); - struct.success.add(_elem1366); + _elem1374 = iprot.readString(); + struct.success.add(_elem1374); } } struct.setSuccessIsSet(true); @@ -125037,15 +129286,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1368 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1368.size); - String _key1369; - String _val1370; - for (int _i1371 = 0; _i1371 < _map1368.size; ++_i1371) + org.apache.thrift.protocol.TMap _map1376 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1376.size); + String _key1377; + String _val1378; + for (int _i1379 = 0; _i1379 < _map1376.size; ++_i1379) { - _key1369 = iprot.readString(); - _val1370 = iprot.readString(); - struct.success.put(_key1369, _val1370); + _key1377 = iprot.readString(); + _val1378 = iprot.readString(); + struct.success.put(_key1377, _val1378); } iprot.readMapEnd(); } @@ -125080,10 +129329,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1372 : struct.success.entrySet()) + for (Map.Entry _iter1380 : struct.success.entrySet()) { - oprot.writeString(_iter1372.getKey()); - oprot.writeString(_iter1372.getValue()); + oprot.writeString(_iter1380.getKey()); + oprot.writeString(_iter1380.getValue()); } oprot.writeMapEnd(); } @@ -125122,10 +129371,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1373 : struct.success.entrySet()) + for (Map.Entry _iter1381 : struct.success.entrySet()) { - oprot.writeString(_iter1373.getKey()); - oprot.writeString(_iter1373.getValue()); + oprot.writeString(_iter1381.getKey()); + oprot.writeString(_iter1381.getValue()); } } } @@ -125140,15 +129389,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1374 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1374.size); - String _key1375; - String _val1376; - for (int _i1377 = 0; _i1377 < _map1374.size; ++_i1377) + org.apache.thrift.protocol.TMap _map1382 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1382.size); + String _key1383; + String _val1384; + for (int _i1385 = 0; _i1385 < _map1382.size; ++_i1385) { - _key1375 = iprot.readString(); - _val1376 = iprot.readString(); - struct.success.put(_key1375, _val1376); + _key1383 = iprot.readString(); + _val1384 = iprot.readString(); + struct.success.put(_key1383, _val1384); } } struct.setSuccessIsSet(true); @@ -125743,15 +129992,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1378 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1378.size); - String _key1379; - String _val1380; - for (int _i1381 = 0; _i1381 < _map1378.size; ++_i1381) + org.apache.thrift.protocol.TMap _map1386 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1386.size); + String _key1387; + String _val1388; + for (int _i1389 = 0; _i1389 < _map1386.size; ++_i1389) { - _key1379 = iprot.readString(); - _val1380 = iprot.readString(); - struct.part_vals.put(_key1379, _val1380); + _key1387 = iprot.readString(); + _val1388 = iprot.readString(); + struct.part_vals.put(_key1387, _val1388); } iprot.readMapEnd(); } @@ -125795,10 +130044,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1382 : struct.part_vals.entrySet()) + for (Map.Entry _iter1390 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1382.getKey()); - oprot.writeString(_iter1382.getValue()); + oprot.writeString(_iter1390.getKey()); + oprot.writeString(_iter1390.getValue()); } oprot.writeMapEnd(); } @@ -125849,10 +130098,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1383 : struct.part_vals.entrySet()) + for (Map.Entry _iter1391 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1383.getKey()); - oprot.writeString(_iter1383.getValue()); + oprot.writeString(_iter1391.getKey()); + oprot.writeString(_iter1391.getValue()); } } } @@ -125875,15 +130124,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1384 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1384.size); - String _key1385; - String _val1386; - for (int _i1387 = 0; _i1387 < _map1384.size; ++_i1387) + org.apache.thrift.protocol.TMap _map1392 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1392.size); + String _key1393; + String _val1394; + for (int _i1395 = 0; _i1395 < _map1392.size; ++_i1395) { - _key1385 = iprot.readString(); - _val1386 = iprot.readString(); - struct.part_vals.put(_key1385, _val1386); + _key1393 = iprot.readString(); + _val1394 = iprot.readString(); + struct.part_vals.put(_key1393, _val1394); } } struct.setPart_valsIsSet(true); @@ -127367,15 +131616,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1388 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1388.size); - String _key1389; - String _val1390; - for (int _i1391 = 0; _i1391 < _map1388.size; ++_i1391) + org.apache.thrift.protocol.TMap _map1396 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1396.size); + String _key1397; + String _val1398; + for (int _i1399 = 0; _i1399 < _map1396.size; ++_i1399) { - _key1389 = iprot.readString(); - _val1390 = iprot.readString(); - struct.part_vals.put(_key1389, _val1390); + _key1397 = iprot.readString(); + _val1398 = iprot.readString(); + struct.part_vals.put(_key1397, _val1398); } iprot.readMapEnd(); } @@ -127419,10 +131668,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1392 : struct.part_vals.entrySet()) + for (Map.Entry _iter1400 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1392.getKey()); - oprot.writeString(_iter1392.getValue()); + oprot.writeString(_iter1400.getKey()); + oprot.writeString(_iter1400.getValue()); } oprot.writeMapEnd(); } @@ -127473,10 +131722,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1393 : struct.part_vals.entrySet()) + for (Map.Entry _iter1401 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1393.getKey()); - oprot.writeString(_iter1393.getValue()); + oprot.writeString(_iter1401.getKey()); + oprot.writeString(_iter1401.getValue()); } } } @@ -127499,15 +131748,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1394 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1394.size); - String _key1395; - String _val1396; - for (int _i1397 = 0; _i1397 < _map1394.size; ++_i1397) + org.apache.thrift.protocol.TMap _map1402 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1402.size); + String _key1403; + String _val1404; + for (int _i1405 = 0; _i1405 < _map1402.size; ++_i1405) { - _key1395 = iprot.readString(); - _val1396 = iprot.readString(); - struct.part_vals.put(_key1395, _val1396); + _key1403 = iprot.readString(); + _val1404 = iprot.readString(); + struct.part_vals.put(_key1403, _val1404); } } struct.setPart_valsIsSet(true); @@ -149863,13 +154112,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1398 = iprot.readListBegin(); - struct.success = new ArrayList(_list1398.size); - String _elem1399; - for (int _i1400 = 0; _i1400 < _list1398.size; ++_i1400) + org.apache.thrift.protocol.TList _list1406 = iprot.readListBegin(); + struct.success = new ArrayList(_list1406.size); + String _elem1407; + for (int _i1408 = 0; _i1408 < _list1406.size; ++_i1408) { - _elem1399 = iprot.readString(); - struct.success.add(_elem1399); + _elem1407 = iprot.readString(); + struct.success.add(_elem1407); } iprot.readListEnd(); } @@ -149904,9 +154153,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1401 : struct.success) + for (String _iter1409 : struct.success) { - oprot.writeString(_iter1401); + oprot.writeString(_iter1409); } oprot.writeListEnd(); } @@ -149945,9 +154194,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1402 : struct.success) + for (String _iter1410 : struct.success) { - oprot.writeString(_iter1402); + oprot.writeString(_iter1410); } } } @@ -149962,13 +154211,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1403 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1403.size); - String _elem1404; - for (int _i1405 = 0; _i1405 < _list1403.size; ++_i1405) + org.apache.thrift.protocol.TList _list1411 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1411.size); + String _elem1412; + for (int _i1413 = 0; _i1413 < _list1411.size; ++_i1413) { - _elem1404 = iprot.readString(); - struct.success.add(_elem1404); + _elem1412 = iprot.readString(); + struct.success.add(_elem1412); } } struct.setSuccessIsSet(true); @@ -154023,13 +158272,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1406 = iprot.readListBegin(); - struct.success = new ArrayList(_list1406.size); - String _elem1407; - for (int _i1408 = 0; _i1408 < _list1406.size; ++_i1408) + org.apache.thrift.protocol.TList _list1414 = iprot.readListBegin(); + struct.success = new ArrayList(_list1414.size); + String _elem1415; + for (int _i1416 = 0; _i1416 < _list1414.size; ++_i1416) { - _elem1407 = iprot.readString(); - struct.success.add(_elem1407); + _elem1415 = iprot.readString(); + struct.success.add(_elem1415); } iprot.readListEnd(); } @@ -154064,9 +158313,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1409 : struct.success) + for (String _iter1417 : struct.success) { - oprot.writeString(_iter1409); + oprot.writeString(_iter1417); } oprot.writeListEnd(); } @@ -154105,9 +158354,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1410 : struct.success) + for (String _iter1418 : struct.success) { - oprot.writeString(_iter1410); + oprot.writeString(_iter1418); } } } @@ -154122,13 +158371,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1411 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1411.size); - String _elem1412; - for (int _i1413 = 0; _i1413 < _list1411.size; ++_i1413) + org.apache.thrift.protocol.TList _list1419 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1419.size); + String _elem1420; + for (int _i1421 = 0; _i1421 < _list1419.size; ++_i1421) { - _elem1412 = iprot.readString(); - struct.success.add(_elem1412); + _elem1420 = iprot.readString(); + struct.success.add(_elem1420); } } struct.setSuccessIsSet(true); @@ -157419,14 +161668,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1414 = iprot.readListBegin(); - struct.success = new ArrayList(_list1414.size); - Role _elem1415; - for (int _i1416 = 0; _i1416 < _list1414.size; ++_i1416) + org.apache.thrift.protocol.TList _list1422 = iprot.readListBegin(); + struct.success = new ArrayList(_list1422.size); + Role _elem1423; + for (int _i1424 = 0; _i1424 < _list1422.size; ++_i1424) { - _elem1415 = new Role(); - _elem1415.read(iprot); - struct.success.add(_elem1415); + _elem1423 = new Role(); + _elem1423.read(iprot); + struct.success.add(_elem1423); } iprot.readListEnd(); } @@ -157461,9 +161710,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1417 : struct.success) + for (Role _iter1425 : struct.success) { - _iter1417.write(oprot); + _iter1425.write(oprot); } oprot.writeListEnd(); } @@ -157502,9 +161751,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1418 : struct.success) + for (Role _iter1426 : struct.success) { - _iter1418.write(oprot); + _iter1426.write(oprot); } } } @@ -157519,14 +161768,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1419 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1419.size); - Role _elem1420; - for (int _i1421 = 0; _i1421 < _list1419.size; ++_i1421) + org.apache.thrift.protocol.TList _list1427 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1427.size); + Role _elem1428; + for (int _i1429 = 0; _i1429 < _list1427.size; ++_i1429) { - _elem1420 = new Role(); - _elem1420.read(iprot); - struct.success.add(_elem1420); + _elem1428 = new Role(); + _elem1428.read(iprot); + struct.success.add(_elem1428); } } struct.setSuccessIsSet(true); @@ -160531,13 +164780,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1422 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1422.size); - String _elem1423; - for (int _i1424 = 0; _i1424 < _list1422.size; ++_i1424) + org.apache.thrift.protocol.TList _list1430 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1430.size); + String _elem1431; + for (int _i1432 = 0; _i1432 < _list1430.size; ++_i1432) { - _elem1423 = iprot.readString(); - struct.group_names.add(_elem1423); + _elem1431 = iprot.readString(); + struct.group_names.add(_elem1431); } iprot.readListEnd(); } @@ -160573,9 +164822,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1425 : struct.group_names) + for (String _iter1433 : struct.group_names) { - oprot.writeString(_iter1425); + oprot.writeString(_iter1433); } oprot.writeListEnd(); } @@ -160618,9 +164867,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1426 : struct.group_names) + for (String _iter1434 : struct.group_names) { - oprot.writeString(_iter1426); + oprot.writeString(_iter1434); } } } @@ -160641,13 +164890,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1427 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1427.size); - String _elem1428; - for (int _i1429 = 0; _i1429 < _list1427.size; ++_i1429) + org.apache.thrift.protocol.TList _list1435 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1435.size); + String _elem1436; + for (int _i1437 = 0; _i1437 < _list1435.size; ++_i1437) { - _elem1428 = iprot.readString(); - struct.group_names.add(_elem1428); + _elem1436 = iprot.readString(); + struct.group_names.add(_elem1436); } } struct.setGroup_namesIsSet(true); @@ -162105,14 +166354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1430 = iprot.readListBegin(); - struct.success = new ArrayList(_list1430.size); - HiveObjectPrivilege _elem1431; - for (int _i1432 = 0; _i1432 < _list1430.size; ++_i1432) + org.apache.thrift.protocol.TList _list1438 = iprot.readListBegin(); + struct.success = new ArrayList(_list1438.size); + HiveObjectPrivilege _elem1439; + for (int _i1440 = 0; _i1440 < _list1438.size; ++_i1440) { - _elem1431 = new HiveObjectPrivilege(); - _elem1431.read(iprot); - struct.success.add(_elem1431); + _elem1439 = new HiveObjectPrivilege(); + _elem1439.read(iprot); + struct.success.add(_elem1439); } iprot.readListEnd(); } @@ -162147,9 +166396,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1433 : struct.success) + for (HiveObjectPrivilege _iter1441 : struct.success) { - _iter1433.write(oprot); + _iter1441.write(oprot); } oprot.writeListEnd(); } @@ -162188,9 +166437,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1434 : struct.success) + for (HiveObjectPrivilege _iter1442 : struct.success) { - _iter1434.write(oprot); + _iter1442.write(oprot); } } } @@ -162205,14 +166454,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1435 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1435.size); - HiveObjectPrivilege _elem1436; - for (int _i1437 = 0; _i1437 < _list1435.size; ++_i1437) + org.apache.thrift.protocol.TList _list1443 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1443.size); + HiveObjectPrivilege _elem1444; + for (int _i1445 = 0; _i1445 < _list1443.size; ++_i1445) { - _elem1436 = new HiveObjectPrivilege(); - _elem1436.read(iprot); - struct.success.add(_elem1436); + _elem1444 = new HiveObjectPrivilege(); + _elem1444.read(iprot); + struct.success.add(_elem1444); } } struct.setSuccessIsSet(true); @@ -165114,13 +169363,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1438 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1438.size); - String _elem1439; - for (int _i1440 = 0; _i1440 < _list1438.size; ++_i1440) + org.apache.thrift.protocol.TList _list1446 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1446.size); + String _elem1447; + for (int _i1448 = 0; _i1448 < _list1446.size; ++_i1448) { - _elem1439 = iprot.readString(); - struct.group_names.add(_elem1439); + _elem1447 = iprot.readString(); + struct.group_names.add(_elem1447); } iprot.readListEnd(); } @@ -165151,9 +169400,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1441 : struct.group_names) + for (String _iter1449 : struct.group_names) { - oprot.writeString(_iter1441); + oprot.writeString(_iter1449); } oprot.writeListEnd(); } @@ -165190,9 +169439,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1442 : struct.group_names) + for (String _iter1450 : struct.group_names) { - oprot.writeString(_iter1442); + oprot.writeString(_iter1450); } } } @@ -165208,13 +169457,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1443 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1443.size); - String _elem1444; - for (int _i1445 = 0; _i1445 < _list1443.size; ++_i1445) + org.apache.thrift.protocol.TList _list1451 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1451.size); + String _elem1452; + for (int _i1453 = 0; _i1453 < _list1451.size; ++_i1453) { - _elem1444 = iprot.readString(); - struct.group_names.add(_elem1444); + _elem1452 = iprot.readString(); + struct.group_names.add(_elem1452); } } struct.setGroup_namesIsSet(true); @@ -165617,13 +169866,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1446 = iprot.readListBegin(); - struct.success = new ArrayList(_list1446.size); - String _elem1447; - for (int _i1448 = 0; _i1448 < _list1446.size; ++_i1448) + org.apache.thrift.protocol.TList _list1454 = iprot.readListBegin(); + struct.success = new ArrayList(_list1454.size); + String _elem1455; + for (int _i1456 = 0; _i1456 < _list1454.size; ++_i1456) { - _elem1447 = iprot.readString(); - struct.success.add(_elem1447); + _elem1455 = iprot.readString(); + struct.success.add(_elem1455); } iprot.readListEnd(); } @@ -165658,9 +169907,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1449 : struct.success) + for (String _iter1457 : struct.success) { - oprot.writeString(_iter1449); + oprot.writeString(_iter1457); } oprot.writeListEnd(); } @@ -165699,9 +169948,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1450 : struct.success) + for (String _iter1458 : struct.success) { - oprot.writeString(_iter1450); + oprot.writeString(_iter1458); } } } @@ -165716,13 +169965,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1451 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1451.size); - String _elem1452; - for (int _i1453 = 0; _i1453 < _list1451.size; ++_i1453) + org.apache.thrift.protocol.TList _list1459 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1459.size); + String _elem1460; + for (int _i1461 = 0; _i1461 < _list1459.size; ++_i1461) { - _elem1452 = iprot.readString(); - struct.success.add(_elem1452); + _elem1460 = iprot.readString(); + struct.success.add(_elem1460); } } struct.setSuccessIsSet(true); @@ -171013,13 +175262,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1454 = iprot.readListBegin(); - struct.success = new ArrayList(_list1454.size); - String _elem1455; - for (int _i1456 = 0; _i1456 < _list1454.size; ++_i1456) + org.apache.thrift.protocol.TList _list1462 = iprot.readListBegin(); + struct.success = new ArrayList(_list1462.size); + String _elem1463; + for (int _i1464 = 0; _i1464 < _list1462.size; ++_i1464) { - _elem1455 = iprot.readString(); - struct.success.add(_elem1455); + _elem1463 = iprot.readString(); + struct.success.add(_elem1463); } iprot.readListEnd(); } @@ -171045,9 +175294,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1457 : struct.success) + for (String _iter1465 : struct.success) { - oprot.writeString(_iter1457); + oprot.writeString(_iter1465); } oprot.writeListEnd(); } @@ -171078,9 +175327,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1458 : struct.success) + for (String _iter1466 : struct.success) { - oprot.writeString(_iter1458); + oprot.writeString(_iter1466); } } } @@ -171092,13 +175341,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1459 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1459.size); - String _elem1460; - for (int _i1461 = 0; _i1461 < _list1459.size; ++_i1461) + org.apache.thrift.protocol.TList _list1467 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1467.size); + String _elem1468; + for (int _i1469 = 0; _i1469 < _list1467.size; ++_i1469) { - _elem1460 = iprot.readString(); - struct.success.add(_elem1460); + _elem1468 = iprot.readString(); + struct.success.add(_elem1468); } } struct.setSuccessIsSet(true); @@ -174128,13 +178377,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1462 = iprot.readListBegin(); - struct.success = new ArrayList(_list1462.size); - String _elem1463; - for (int _i1464 = 0; _i1464 < _list1462.size; ++_i1464) + org.apache.thrift.protocol.TList _list1470 = iprot.readListBegin(); + struct.success = new ArrayList(_list1470.size); + String _elem1471; + for (int _i1472 = 0; _i1472 < _list1470.size; ++_i1472) { - _elem1463 = iprot.readString(); - struct.success.add(_elem1463); + _elem1471 = iprot.readString(); + struct.success.add(_elem1471); } iprot.readListEnd(); } @@ -174160,9 +178409,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1465 : struct.success) + for (String _iter1473 : struct.success) { - oprot.writeString(_iter1465); + oprot.writeString(_iter1473); } oprot.writeListEnd(); } @@ -174193,9 +178442,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1466 : struct.success) + for (String _iter1474 : struct.success) { - oprot.writeString(_iter1466); + oprot.writeString(_iter1474); } } } @@ -174207,13 +178456,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1467 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1467.size); - String _elem1468; - for (int _i1469 = 0; _i1469 < _list1467.size; ++_i1469) + org.apache.thrift.protocol.TList _list1475 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1475.size); + String _elem1476; + for (int _i1477 = 0; _i1477 < _list1475.size; ++_i1477) { - _elem1468 = iprot.readString(); - struct.success.add(_elem1468); + _elem1476 = iprot.readString(); + struct.success.add(_elem1476); } } struct.setSuccessIsSet(true); @@ -221787,14 +226036,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_all_vers case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1470 = iprot.readListBegin(); - struct.success = new ArrayList(_list1470.size); - SchemaVersion _elem1471; - for (int _i1472 = 0; _i1472 < _list1470.size; ++_i1472) + org.apache.thrift.protocol.TList _list1478 = iprot.readListBegin(); + struct.success = new ArrayList(_list1478.size); + SchemaVersion _elem1479; + for (int _i1480 = 0; _i1480 < _list1478.size; ++_i1480) { - _elem1471 = new SchemaVersion(); - _elem1471.read(iprot); - struct.success.add(_elem1471); + _elem1479 = new SchemaVersion(); + _elem1479.read(iprot); + struct.success.add(_elem1479); } iprot.readListEnd(); } @@ -221838,9 +226087,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_all_ver oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (SchemaVersion _iter1473 : struct.success) + for (SchemaVersion _iter1481 : struct.success) { - _iter1473.write(oprot); + _iter1481.write(oprot); } oprot.writeListEnd(); } @@ -221887,9 +226136,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_all_vers if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (SchemaVersion _iter1474 : struct.success) + for (SchemaVersion _iter1482 : struct.success) { - _iter1474.write(oprot); + _iter1482.write(oprot); } } } @@ -221907,14 +226156,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_all_versi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1475 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1475.size); - SchemaVersion _elem1476; - for (int _i1477 = 0; _i1477 < _list1475.size; ++_i1477) + org.apache.thrift.protocol.TList _list1483 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1483.size); + SchemaVersion _elem1484; + for (int _i1485 = 0; _i1485 < _list1483.size; ++_i1485) { - _elem1476 = new SchemaVersion(); - _elem1476.read(iprot); - struct.success.add(_elem1476); + _elem1484 = new SchemaVersion(); + _elem1484.read(iprot); + struct.success.add(_elem1484); } } struct.setSuccessIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsRequest.java index 9ad8728351..b5d482931f 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsRequest.java @@ -38,8 +38,9 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class UniqueConstraintsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UniqueConstraintsRequest"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,13 +48,15 @@ schemes.put(TupleScheme.class, new UniqueConstraintsRequestTupleSchemeFactory()); } + private String catName; // required private String db_name; // required private String tbl_name; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "db_name"), - TBL_NAME((short)2, "tbl_name"); + CAT_NAME((short)1, "catName"), + DB_NAME((short)2, "db_name"), + TBL_NAME((short)3, "tbl_name"); private static final Map byName = new HashMap(); @@ -68,9 +71,11 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME + case 1: // CAT_NAME + return CAT_NAME; + case 2: // DB_NAME return DB_NAME; - case 2: // TBL_NAME + case 3: // TBL_NAME return TBL_NAME; default: return null; @@ -115,6 +120,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.REQUIRED, @@ -127,10 +134,12 @@ public UniqueConstraintsRequest() { } public UniqueConstraintsRequest( + String catName, String db_name, String tbl_name) { this(); + this.catName = catName; this.db_name = db_name; this.tbl_name = tbl_name; } @@ -139,6 +148,9 @@ public UniqueConstraintsRequest( * Performs a deep copy on other. */ public UniqueConstraintsRequest(UniqueConstraintsRequest other) { + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetDb_name()) { this.db_name = other.db_name; } @@ -153,10 +165,34 @@ public UniqueConstraintsRequest deepCopy() { @Override public void clear() { + this.catName = null; this.db_name = null; this.tbl_name = null; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getDb_name() { return this.db_name; } @@ -205,6 +241,14 @@ public void setTbl_nameIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case DB_NAME: if (value == null) { unsetDb_name(); @@ -226,6 +270,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case DB_NAME: return getDb_name(); @@ -243,6 +290,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case DB_NAME: return isSetDb_name(); case TBL_NAME: @@ -264,6 +313,15 @@ public boolean equals(UniqueConstraintsRequest that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_db_name = true && this.isSetDb_name(); boolean that_present_db_name = true && that.isSetDb_name(); if (this_present_db_name || that_present_db_name) { @@ -289,6 +347,11 @@ public boolean equals(UniqueConstraintsRequest that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_db_name = true && (isSetDb_name()); list.add(present_db_name); if (present_db_name) @@ -310,6 +373,16 @@ public int compareTo(UniqueConstraintsRequest other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); if (lastComparison != 0) { return lastComparison; @@ -350,6 +423,14 @@ public String toString() { StringBuilder sb = new StringBuilder("UniqueConstraintsRequest("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("db_name:"); if (this.db_name == null) { sb.append("null"); @@ -371,6 +452,10 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields + if (!isSetCatName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'catName' is unset! Struct:" + toString()); + } + if (!isSetDb_name()) { throw new org.apache.thrift.protocol.TProtocolException("Required field 'db_name' is unset! Struct:" + toString()); } @@ -416,7 +501,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, UniqueConstraintsRe break; } switch (schemeField.id) { - case 1: // DB_NAME + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DB_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -424,7 +517,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, UniqueConstraintsRe org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TBL_NAME + case 3: // TBL_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); @@ -445,6 +538,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, UniqueConstraintsR struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.db_name != null) { oprot.writeFieldBegin(DB_NAME_FIELD_DESC); oprot.writeString(struct.db_name); @@ -472,6 +570,7 @@ public UniqueConstraintsRequestTupleScheme getScheme() { @Override public void write(org.apache.thrift.protocol.TProtocol prot, UniqueConstraintsRequest struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.catName); oprot.writeString(struct.db_name); oprot.writeString(struct.tbl_name); } @@ -479,6 +578,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, UniqueConstraintsRe @Override public void read(org.apache.thrift.protocol.TProtocol prot, UniqueConstraintsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); struct.tbl_name = iprot.readString(); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsResponse.java index 4d45bee71e..80dea833bb 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, UniqueConstraintsRe case 1: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list328 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list328.size); - SQLUniqueConstraint _elem329; - for (int _i330 = 0; _i330 < _list328.size; ++_i330) + org.apache.thrift.protocol.TList _list336 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list336.size); + SQLUniqueConstraint _elem337; + for (int _i338 = 0; _i338 < _list336.size; ++_i338) { - _elem329 = new SQLUniqueConstraint(); - _elem329.read(iprot); - struct.uniqueConstraints.add(_elem329); + _elem337 = new SQLUniqueConstraint(); + _elem337.read(iprot); + struct.uniqueConstraints.add(_elem337); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, UniqueConstraintsR oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter331 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter339 : struct.uniqueConstraints) { - _iter331.write(oprot); + _iter339.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, UniqueConstraintsRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter332 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter340 : struct.uniqueConstraints) { - _iter332.write(oprot); + _iter340.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, UniqueConstraintsRe public void read(org.apache.thrift.protocol.TProtocol prot, UniqueConstraintsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list333 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list333.size); - SQLUniqueConstraint _elem334; - for (int _i335 = 0; _i335 < _list333.size; ++_i335) + org.apache.thrift.protocol.TList _list341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list341.size); + SQLUniqueConstraint _elem342; + for (int _i343 = 0; _i343 < _list341.size; ++_i343) { - _elem334 = new SQLUniqueConstraint(); - _elem334.read(iprot); - struct.uniqueConstraints.add(_elem334); + _elem342 = new SQLUniqueConstraint(); + _elem342.read(iprot); + struct.uniqueConstraints.add(_elem342); } } struct.setUniqueConstraintsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java index a7be2ecb67..35605679c2 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java @@ -755,14 +755,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 2: // POOLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list816 = iprot.readListBegin(); - struct.pools = new ArrayList(_list816.size); - WMPool _elem817; - for (int _i818 = 0; _i818 < _list816.size; ++_i818) + org.apache.thrift.protocol.TList _list824 = iprot.readListBegin(); + struct.pools = new ArrayList(_list824.size); + WMPool _elem825; + for (int _i826 = 0; _i826 < _list824.size; ++_i826) { - _elem817 = new WMPool(); - _elem817.read(iprot); - struct.pools.add(_elem817); + _elem825 = new WMPool(); + _elem825.read(iprot); + struct.pools.add(_elem825); } iprot.readListEnd(); } @@ -774,14 +774,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 3: // MAPPINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list819 = iprot.readListBegin(); - struct.mappings = new ArrayList(_list819.size); - WMMapping _elem820; - for (int _i821 = 0; _i821 < _list819.size; ++_i821) + org.apache.thrift.protocol.TList _list827 = iprot.readListBegin(); + struct.mappings = new ArrayList(_list827.size); + WMMapping _elem828; + for (int _i829 = 0; _i829 < _list827.size; ++_i829) { - _elem820 = new WMMapping(); - _elem820.read(iprot); - struct.mappings.add(_elem820); + _elem828 = new WMMapping(); + _elem828.read(iprot); + struct.mappings.add(_elem828); } iprot.readListEnd(); } @@ -793,14 +793,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 4: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list822 = iprot.readListBegin(); - struct.triggers = new ArrayList(_list822.size); - WMTrigger _elem823; - for (int _i824 = 0; _i824 < _list822.size; ++_i824) + org.apache.thrift.protocol.TList _list830 = iprot.readListBegin(); + struct.triggers = new ArrayList(_list830.size); + WMTrigger _elem831; + for (int _i832 = 0; _i832 < _list830.size; ++_i832) { - _elem823 = new WMTrigger(); - _elem823.read(iprot); - struct.triggers.add(_elem823); + _elem831 = new WMTrigger(); + _elem831.read(iprot); + struct.triggers.add(_elem831); } iprot.readListEnd(); } @@ -812,14 +812,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMFullResourcePlan case 5: // POOL_TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list825 = iprot.readListBegin(); - struct.poolTriggers = new ArrayList(_list825.size); - WMPoolTrigger _elem826; - for (int _i827 = 0; _i827 < _list825.size; ++_i827) + org.apache.thrift.protocol.TList _list833 = iprot.readListBegin(); + struct.poolTriggers = new ArrayList(_list833.size); + WMPoolTrigger _elem834; + for (int _i835 = 0; _i835 < _list833.size; ++_i835) { - _elem826 = new WMPoolTrigger(); - _elem826.read(iprot); - struct.poolTriggers.add(_elem826); + _elem834 = new WMPoolTrigger(); + _elem834.read(iprot); + struct.poolTriggers.add(_elem834); } iprot.readListEnd(); } @@ -850,9 +850,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(POOLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.pools.size())); - for (WMPool _iter828 : struct.pools) + for (WMPool _iter836 : struct.pools) { - _iter828.write(oprot); + _iter836.write(oprot); } oprot.writeListEnd(); } @@ -863,9 +863,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(MAPPINGS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mappings.size())); - for (WMMapping _iter829 : struct.mappings) + for (WMMapping _iter837 : struct.mappings) { - _iter829.write(oprot); + _iter837.write(oprot); } oprot.writeListEnd(); } @@ -877,9 +877,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); - for (WMTrigger _iter830 : struct.triggers) + for (WMTrigger _iter838 : struct.triggers) { - _iter830.write(oprot); + _iter838.write(oprot); } oprot.writeListEnd(); } @@ -891,9 +891,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMFullResourcePlan oprot.writeFieldBegin(POOL_TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.poolTriggers.size())); - for (WMPoolTrigger _iter831 : struct.poolTriggers) + for (WMPoolTrigger _iter839 : struct.poolTriggers) { - _iter831.write(oprot); + _iter839.write(oprot); } oprot.writeListEnd(); } @@ -920,9 +920,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan struct.plan.write(oprot); { oprot.writeI32(struct.pools.size()); - for (WMPool _iter832 : struct.pools) + for (WMPool _iter840 : struct.pools) { - _iter832.write(oprot); + _iter840.write(oprot); } } BitSet optionals = new BitSet(); @@ -939,27 +939,27 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan if (struct.isSetMappings()) { { oprot.writeI32(struct.mappings.size()); - for (WMMapping _iter833 : struct.mappings) + for (WMMapping _iter841 : struct.mappings) { - _iter833.write(oprot); + _iter841.write(oprot); } } } if (struct.isSetTriggers()) { { oprot.writeI32(struct.triggers.size()); - for (WMTrigger _iter834 : struct.triggers) + for (WMTrigger _iter842 : struct.triggers) { - _iter834.write(oprot); + _iter842.write(oprot); } } } if (struct.isSetPoolTriggers()) { { oprot.writeI32(struct.poolTriggers.size()); - for (WMPoolTrigger _iter835 : struct.poolTriggers) + for (WMPoolTrigger _iter843 : struct.poolTriggers) { - _iter835.write(oprot); + _iter843.write(oprot); } } } @@ -972,56 +972,56 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMFullResourcePlan s struct.plan.read(iprot); struct.setPlanIsSet(true); { - org.apache.thrift.protocol.TList _list836 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.pools = new ArrayList(_list836.size); - WMPool _elem837; - for (int _i838 = 0; _i838 < _list836.size; ++_i838) + org.apache.thrift.protocol.TList _list844 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.pools = new ArrayList(_list844.size); + WMPool _elem845; + for (int _i846 = 0; _i846 < _list844.size; ++_i846) { - _elem837 = new WMPool(); - _elem837.read(iprot); - struct.pools.add(_elem837); + _elem845 = new WMPool(); + _elem845.read(iprot); + struct.pools.add(_elem845); } } struct.setPoolsIsSet(true); BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list839 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.mappings = new ArrayList(_list839.size); - WMMapping _elem840; - for (int _i841 = 0; _i841 < _list839.size; ++_i841) + org.apache.thrift.protocol.TList _list847 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.mappings = new ArrayList(_list847.size); + WMMapping _elem848; + for (int _i849 = 0; _i849 < _list847.size; ++_i849) { - _elem840 = new WMMapping(); - _elem840.read(iprot); - struct.mappings.add(_elem840); + _elem848 = new WMMapping(); + _elem848.read(iprot); + struct.mappings.add(_elem848); } } struct.setMappingsIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list842 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.triggers = new ArrayList(_list842.size); - WMTrigger _elem843; - for (int _i844 = 0; _i844 < _list842.size; ++_i844) + org.apache.thrift.protocol.TList _list850 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.triggers = new ArrayList(_list850.size); + WMTrigger _elem851; + for (int _i852 = 0; _i852 < _list850.size; ++_i852) { - _elem843 = new WMTrigger(); - _elem843.read(iprot); - struct.triggers.add(_elem843); + _elem851 = new WMTrigger(); + _elem851.read(iprot); + struct.triggers.add(_elem851); } } struct.setTriggersIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list845 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.poolTriggers = new ArrayList(_list845.size); - WMPoolTrigger _elem846; - for (int _i847 = 0; _i847 < _list845.size; ++_i847) + org.apache.thrift.protocol.TList _list853 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.poolTriggers = new ArrayList(_list853.size); + WMPoolTrigger _elem854; + for (int _i855 = 0; _i855 < _list853.size; ++_i855) { - _elem846 = new WMPoolTrigger(); - _elem846.read(iprot); - struct.poolTriggers.add(_elem846); + _elem854 = new WMPoolTrigger(); + _elem854.read(iprot); + struct.poolTriggers.add(_elem854); } } struct.setPoolTriggersIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java index d931b47790..ffe8b68c9f 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetAllResourcePla case 1: // RESOURCE_PLANS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list848 = iprot.readListBegin(); - struct.resourcePlans = new ArrayList(_list848.size); - WMResourcePlan _elem849; - for (int _i850 = 0; _i850 < _list848.size; ++_i850) + org.apache.thrift.protocol.TList _list856 = iprot.readListBegin(); + struct.resourcePlans = new ArrayList(_list856.size); + WMResourcePlan _elem857; + for (int _i858 = 0; _i858 < _list856.size; ++_i858) { - _elem849 = new WMResourcePlan(); - _elem849.read(iprot); - struct.resourcePlans.add(_elem849); + _elem857 = new WMResourcePlan(); + _elem857.read(iprot); + struct.resourcePlans.add(_elem857); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMGetAllResourcePl oprot.writeFieldBegin(RESOURCE_PLANS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourcePlans.size())); - for (WMResourcePlan _iter851 : struct.resourcePlans) + for (WMResourcePlan _iter859 : struct.resourcePlans) { - _iter851.write(oprot); + _iter859.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMGetAllResourcePla if (struct.isSetResourcePlans()) { { oprot.writeI32(struct.resourcePlans.size()); - for (WMResourcePlan _iter852 : struct.resourcePlans) + for (WMResourcePlan _iter860 : struct.resourcePlans) { - _iter852.write(oprot); + _iter860.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetAllResourcePlan BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list853 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.resourcePlans = new ArrayList(_list853.size); - WMResourcePlan _elem854; - for (int _i855 = 0; _i855 < _list853.size; ++_i855) + org.apache.thrift.protocol.TList _list861 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.resourcePlans = new ArrayList(_list861.size); + WMResourcePlan _elem862; + for (int _i863 = 0; _i863 < _list861.size; ++_i863) { - _elem854 = new WMResourcePlan(); - _elem854.read(iprot); - struct.resourcePlans.add(_elem854); + _elem862 = new WMResourcePlan(); + _elem862.read(iprot); + struct.resourcePlans.add(_elem862); } } struct.setResourcePlansIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java index a674db2a80..9dfebf09bf 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMGetTriggersForRes case 1: // TRIGGERS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list872 = iprot.readListBegin(); - struct.triggers = new ArrayList(_list872.size); - WMTrigger _elem873; - for (int _i874 = 0; _i874 < _list872.size; ++_i874) + org.apache.thrift.protocol.TList _list880 = iprot.readListBegin(); + struct.triggers = new ArrayList(_list880.size); + WMTrigger _elem881; + for (int _i882 = 0; _i882 < _list880.size; ++_i882) { - _elem873 = new WMTrigger(); - _elem873.read(iprot); - struct.triggers.add(_elem873); + _elem881 = new WMTrigger(); + _elem881.read(iprot); + struct.triggers.add(_elem881); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMGetTriggersForRe oprot.writeFieldBegin(TRIGGERS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size())); - for (WMTrigger _iter875 : struct.triggers) + for (WMTrigger _iter883 : struct.triggers) { - _iter875.write(oprot); + _iter883.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMGetTriggersForRes if (struct.isSetTriggers()) { { oprot.writeI32(struct.triggers.size()); - for (WMTrigger _iter876 : struct.triggers) + for (WMTrigger _iter884 : struct.triggers) { - _iter876.write(oprot); + _iter884.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMGetTriggersForReso BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list877 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.triggers = new ArrayList(_list877.size); - WMTrigger _elem878; - for (int _i879 = 0; _i879 < _list877.size; ++_i879) + org.apache.thrift.protocol.TList _list885 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.triggers = new ArrayList(_list885.size); + WMTrigger _elem886; + for (int _i887 = 0; _i887 < _list885.size; ++_i887) { - _elem878 = new WMTrigger(); - _elem878.read(iprot); - struct.triggers.add(_elem878); + _elem886 = new WMTrigger(); + _elem886.read(iprot); + struct.triggers.add(_elem886); } } struct.setTriggersIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java index db1195843d..8f3b06541f 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java @@ -441,13 +441,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMValidateResourceP case 1: // ERRORS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list856 = iprot.readListBegin(); - struct.errors = new ArrayList(_list856.size); - String _elem857; - for (int _i858 = 0; _i858 < _list856.size; ++_i858) + org.apache.thrift.protocol.TList _list864 = iprot.readListBegin(); + struct.errors = new ArrayList(_list864.size); + String _elem865; + for (int _i866 = 0; _i866 < _list864.size; ++_i866) { - _elem857 = iprot.readString(); - struct.errors.add(_elem857); + _elem865 = iprot.readString(); + struct.errors.add(_elem865); } iprot.readListEnd(); } @@ -459,13 +459,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, WMValidateResourceP case 2: // WARNINGS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list859 = iprot.readListBegin(); - struct.warnings = new ArrayList(_list859.size); - String _elem860; - for (int _i861 = 0; _i861 < _list859.size; ++_i861) + org.apache.thrift.protocol.TList _list867 = iprot.readListBegin(); + struct.warnings = new ArrayList(_list867.size); + String _elem868; + for (int _i869 = 0; _i869 < _list867.size; ++_i869) { - _elem860 = iprot.readString(); - struct.warnings.add(_elem860); + _elem868 = iprot.readString(); + struct.warnings.add(_elem868); } iprot.readListEnd(); } @@ -492,9 +492,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMValidateResource oprot.writeFieldBegin(ERRORS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.errors.size())); - for (String _iter862 : struct.errors) + for (String _iter870 : struct.errors) { - oprot.writeString(_iter862); + oprot.writeString(_iter870); } oprot.writeListEnd(); } @@ -506,9 +506,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, WMValidateResource oprot.writeFieldBegin(WARNINGS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.warnings.size())); - for (String _iter863 : struct.warnings) + for (String _iter871 : struct.warnings) { - oprot.writeString(_iter863); + oprot.writeString(_iter871); } oprot.writeListEnd(); } @@ -543,18 +543,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, WMValidateResourceP if (struct.isSetErrors()) { { oprot.writeI32(struct.errors.size()); - for (String _iter864 : struct.errors) + for (String _iter872 : struct.errors) { - oprot.writeString(_iter864); + oprot.writeString(_iter872); } } } if (struct.isSetWarnings()) { { oprot.writeI32(struct.warnings.size()); - for (String _iter865 : struct.warnings) + for (String _iter873 : struct.warnings) { - oprot.writeString(_iter865); + oprot.writeString(_iter873); } } } @@ -566,26 +566,26 @@ public void read(org.apache.thrift.protocol.TProtocol prot, WMValidateResourcePl BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list866 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.errors = new ArrayList(_list866.size); - String _elem867; - for (int _i868 = 0; _i868 < _list866.size; ++_i868) + org.apache.thrift.protocol.TList _list874 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.errors = new ArrayList(_list874.size); + String _elem875; + for (int _i876 = 0; _i876 < _list874.size; ++_i876) { - _elem867 = iprot.readString(); - struct.errors.add(_elem867); + _elem875 = iprot.readString(); + struct.errors.add(_elem875); } } struct.setErrorsIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list869 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.warnings = new ArrayList(_list869.size); - String _elem870; - for (int _i871 = 0; _i871 < _list869.size; ++_i871) + org.apache.thrift.protocol.TList _list877 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.warnings = new ArrayList(_list877.size); + String _elem878; + for (int _i879 = 0; _i879 < _list877.size; ++_i879) { - _elem870 = iprot.readString(); - struct.warnings.add(_elem870); + _elem878 = iprot.readString(); + struct.warnings.add(_elem878); } } struct.setWarningsIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index d00d11be3e..a15a387495 100644 --- standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -33,6 +33,32 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { */ public function setMetaConf($key, $value); /** + * @param \metastore\CreateCatalogRequest $catalog + * @throws \metastore\AlreadyExistsException + * @throws \metastore\InvalidObjectException + * @throws \metastore\MetaException + */ + public function create_catalog(\metastore\CreateCatalogRequest $catalog); + /** + * @param \metastore\GetCatalogRequest $catName + * @return \metastore\GetCatalogResponse + * @throws \metastore\NoSuchObjectException + * @throws \metastore\MetaException + */ + public function get_catalog(\metastore\GetCatalogRequest $catName); + /** + * @return \metastore\GetCatalogsResponse + * @throws \metastore\MetaException + */ + public function get_catalogs(); + /** + * @param \metastore\DropCatalogRequest $catName + * @throws \metastore\NoSuchObjectException + * @throws \metastore\InvalidOperationException + * @throws \metastore\MetaException + */ + public function drop_catalog(\metastore\DropCatalogRequest $catName); + /** * @param \metastore\Database $database * @throws \metastore\AlreadyExistsException * @throws \metastore\InvalidObjectException @@ -310,6 +336,7 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { */ public function get_materialization_invalidation_info($dbname, array $tbl_names); /** + * @param string $catName * @param string $dbname * @param string $tbl_name * @param \metastore\CreationMetadata $creation_metadata @@ -317,7 +344,7 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @throws \metastore\InvalidOperationException * @throws \metastore\UnknownDBException */ - public function update_creation_metadata($dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata); + public function update_creation_metadata($catName, $dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata); /** * @param string $dbname * @param string $filter @@ -1600,6 +1627,230 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas return; } + public function create_catalog(\metastore\CreateCatalogRequest $catalog) + { + $this->send_create_catalog($catalog); + $this->recv_create_catalog(); + } + + public function send_create_catalog(\metastore\CreateCatalogRequest $catalog) + { + $args = new \metastore\ThriftHiveMetastore_create_catalog_args(); + $args->catalog = $catalog; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'create_catalog', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('create_catalog', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_create_catalog() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_create_catalog_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_create_catalog_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + return; + } + + public function get_catalog(\metastore\GetCatalogRequest $catName) + { + $this->send_get_catalog($catName); + return $this->recv_get_catalog(); + } + + public function send_get_catalog(\metastore\GetCatalogRequest $catName) + { + $args = new \metastore\ThriftHiveMetastore_get_catalog_args(); + $args->catName = $catName; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_catalog', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_catalog', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_catalog() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_catalog_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_catalog_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + throw new \Exception("get_catalog failed: unknown result"); + } + + public function get_catalogs() + { + $this->send_get_catalogs(); + return $this->recv_get_catalogs(); + } + + public function send_get_catalogs() + { + $args = new \metastore\ThriftHiveMetastore_get_catalogs_args(); + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_catalogs', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_catalogs', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_catalogs() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_catalogs_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_catalogs_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + throw new \Exception("get_catalogs failed: unknown result"); + } + + public function drop_catalog(\metastore\DropCatalogRequest $catName) + { + $this->send_drop_catalog($catName); + $this->recv_drop_catalog(); + } + + public function send_drop_catalog(\metastore\DropCatalogRequest $catName) + { + $args = new \metastore\ThriftHiveMetastore_drop_catalog_args(); + $args->catName = $catName; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'drop_catalog', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('drop_catalog', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_drop_catalog() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_drop_catalog_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_drop_catalog_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + return; + } + public function create_database(\metastore\Database $database) { $this->send_create_database($database); @@ -3703,15 +3954,16 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_materialization_invalidation_info failed: unknown result"); } - public function update_creation_metadata($dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata) + public function update_creation_metadata($catName, $dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata) { - $this->send_update_creation_metadata($dbname, $tbl_name, $creation_metadata); + $this->send_update_creation_metadata($catName, $dbname, $tbl_name, $creation_metadata); $this->recv_update_creation_metadata(); } - public function send_update_creation_metadata($dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata) + public function send_update_creation_metadata($catName, $dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata) { $args = new \metastore\ThriftHiveMetastore_update_creation_metadata_args(); + $args->catName = $catName; $args->dbname = $dbname; $args->tbl_name = $tbl_name; $args->creation_metadata = $creation_metadata; @@ -12989,33 +13241,33 @@ class ThriftHiveMetastore_setMetaConf_result { } -class ThriftHiveMetastore_create_database_args { +class ThriftHiveMetastore_create_catalog_args { static $_TSPEC; /** - * @var \metastore\Database + * @var \metastore\CreateCatalogRequest */ - public $database = null; + public $catalog = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'database', + 'var' => 'catalog', 'type' => TType::STRUCT, - 'class' => '\metastore\Database', + 'class' => '\metastore\CreateCatalogRequest', ), ); } if (is_array($vals)) { - if (isset($vals['database'])) { - $this->database = $vals['database']; + if (isset($vals['catalog'])) { + $this->catalog = $vals['catalog']; } } } public function getName() { - return 'ThriftHiveMetastore_create_database_args'; + return 'ThriftHiveMetastore_create_catalog_args'; } public function read($input) @@ -13035,8 +13287,8 @@ class ThriftHiveMetastore_create_database_args { { case 1: if ($ftype == TType::STRUCT) { - $this->database = new \metastore\Database(); - $xfer += $this->database->read($input); + $this->catalog = new \metastore\CreateCatalogRequest(); + $xfer += $this->catalog->read($input); } else { $xfer += $input->skip($ftype); } @@ -13053,13 +13305,13 @@ class ThriftHiveMetastore_create_database_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_database_args'); - if ($this->database !== null) { - if (!is_object($this->database)) { + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_catalog_args'); + if ($this->catalog !== null) { + if (!is_object($this->catalog)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); } - $xfer += $output->writeFieldBegin('database', TType::STRUCT, 1); - $xfer += $this->database->write($output); + $xfer += $output->writeFieldBegin('catalog', TType::STRUCT, 1); + $xfer += $this->catalog->write($output); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -13069,7 +13321,7 @@ class ThriftHiveMetastore_create_database_args { } -class ThriftHiveMetastore_create_database_result { +class ThriftHiveMetastore_create_catalog_result { static $_TSPEC; /** @@ -13119,7 +13371,7 @@ class ThriftHiveMetastore_create_database_result { } public function getName() { - return 'ThriftHiveMetastore_create_database_result'; + return 'ThriftHiveMetastore_create_catalog_result'; } public function read($input) @@ -13173,7 +13425,7 @@ class ThriftHiveMetastore_create_database_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_database_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_catalog_result'); if ($this->o1 !== null) { $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); $xfer += $this->o1->write($output); @@ -13196,32 +13448,33 @@ class ThriftHiveMetastore_create_database_result { } -class ThriftHiveMetastore_get_database_args { +class ThriftHiveMetastore_get_catalog_args { static $_TSPEC; /** - * @var string + * @var \metastore\GetCatalogRequest */ - public $name = null; + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'name', - 'type' => TType::STRING, + 'var' => 'catName', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetCatalogRequest', ), ); } if (is_array($vals)) { - if (isset($vals['name'])) { - $this->name = $vals['name']; + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; } } } public function getName() { - return 'ThriftHiveMetastore_get_database_args'; + return 'ThriftHiveMetastore_get_catalog_args'; } public function read($input) @@ -13240,8 +13493,9 @@ class ThriftHiveMetastore_get_database_args { switch ($fid) { case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->name); + if ($ftype == TType::STRUCT) { + $this->catName = new \metastore\GetCatalogRequest(); + $xfer += $this->catName->read($input); } else { $xfer += $input->skip($ftype); } @@ -13258,10 +13512,13 @@ class ThriftHiveMetastore_get_database_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_database_args'); - if ($this->name !== null) { - $xfer += $output->writeFieldBegin('name', TType::STRING, 1); - $xfer += $output->writeString($this->name); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_catalog_args'); + if ($this->catName !== null) { + if (!is_object($this->catName)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('catName', TType::STRUCT, 1); + $xfer += $this->catName->write($output); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -13271,11 +13528,11 @@ class ThriftHiveMetastore_get_database_args { } -class ThriftHiveMetastore_get_database_result { +class ThriftHiveMetastore_get_catalog_result { static $_TSPEC; /** - * @var \metastore\Database + * @var \metastore\GetCatalogResponse */ public $success = null; /** @@ -13293,7 +13550,7 @@ class ThriftHiveMetastore_get_database_result { 0 => array( 'var' => 'success', 'type' => TType::STRUCT, - 'class' => '\metastore\Database', + 'class' => '\metastore\GetCatalogResponse', ), 1 => array( 'var' => 'o1', @@ -13321,7 +13578,7 @@ class ThriftHiveMetastore_get_database_result { } public function getName() { - return 'ThriftHiveMetastore_get_database_result'; + return 'ThriftHiveMetastore_get_catalog_result'; } public function read($input) @@ -13341,7 +13598,7 @@ class ThriftHiveMetastore_get_database_result { { case 0: if ($ftype == TType::STRUCT) { - $this->success = new \metastore\Database(); + $this->success = new \metastore\GetCatalogResponse(); $xfer += $this->success->read($input); } else { $xfer += $input->skip($ftype); @@ -13375,7 +13632,7 @@ class ThriftHiveMetastore_get_database_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_database_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_catalog_result'); if ($this->success !== null) { if (!is_object($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -13401,54 +13658,95 @@ class ThriftHiveMetastore_get_database_result { } -class ThriftHiveMetastore_drop_database_args { +class ThriftHiveMetastore_get_catalogs_args { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_catalogs_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_catalogs_args'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_catalogs_result { static $_TSPEC; /** - * @var string - */ - public $name = null; - /** - * @var bool + * @var \metastore\GetCatalogsResponse */ - public $deleteData = null; + public $success = null; /** - * @var bool + * @var \metastore\MetaException */ - public $cascade = null; + public $o1 = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( - 1 => array( - 'var' => 'name', - 'type' => TType::STRING, - ), - 2 => array( - 'var' => 'deleteData', - 'type' => TType::BOOL, + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetCatalogsResponse', ), - 3 => array( - 'var' => 'cascade', - 'type' => TType::BOOL, + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', ), ); } if (is_array($vals)) { - if (isset($vals['name'])) { - $this->name = $vals['name']; - } - if (isset($vals['deleteData'])) { - $this->deleteData = $vals['deleteData']; + if (isset($vals['success'])) { + $this->success = $vals['success']; } - if (isset($vals['cascade'])) { - $this->cascade = $vals['cascade']; + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; } } } public function getName() { - return 'ThriftHiveMetastore_drop_database_args'; + return 'ThriftHiveMetastore_get_catalogs_result'; } public function read($input) @@ -13466,23 +13764,18 @@ class ThriftHiveMetastore_drop_database_args { } switch ($fid) { - case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->name); - } else { - $xfer += $input->skip($ftype); - } - break; - case 2: - if ($ftype == TType::BOOL) { - $xfer += $input->readBool($this->deleteData); + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\GetCatalogsResponse(); + $xfer += $this->success->read($input); } else { $xfer += $input->skip($ftype); } break; - case 3: - if ($ftype == TType::BOOL) { - $xfer += $input->readBool($this->cascade); + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); } else { $xfer += $input->skip($ftype); } @@ -13499,20 +13792,98 @@ class ThriftHiveMetastore_drop_database_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_database_args'); - if ($this->name !== null) { - $xfer += $output->writeFieldBegin('name', TType::STRING, 1); - $xfer += $output->writeString($this->name); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_catalogs_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); $xfer += $output->writeFieldEnd(); } - if ($this->deleteData !== null) { - $xfer += $output->writeFieldBegin('deleteData', TType::BOOL, 2); - $xfer += $output->writeBool($this->deleteData); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); $xfer += $output->writeFieldEnd(); } - if ($this->cascade !== null) { - $xfer += $output->writeFieldBegin('cascade', TType::BOOL, 3); - $xfer += $output->writeBool($this->cascade); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_drop_catalog_args { + static $_TSPEC; + + /** + * @var \metastore\DropCatalogRequest + */ + public $catName = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'catName', + 'type' => TType::STRUCT, + 'class' => '\metastore\DropCatalogRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_catalog_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->catName = new \metastore\DropCatalogRequest(); + $xfer += $this->catName->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_catalog_args'); + if ($this->catName !== null) { + if (!is_object($this->catName)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('catName', TType::STRUCT, 1); + $xfer += $this->catName->write($output); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -13522,7 +13893,667 @@ class ThriftHiveMetastore_drop_database_args { } -class ThriftHiveMetastore_drop_database_result { +class ThriftHiveMetastore_drop_catalog_result { + static $_TSPEC; + + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\InvalidOperationException + */ + public $o2 = null; + /** + * @var \metastore\MetaException + */ + public $o3 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidOperationException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_catalog_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\InvalidOperationException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_catalog_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_create_database_args { + static $_TSPEC; + + /** + * @var \metastore\Database + */ + public $database = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'database', + 'type' => TType::STRUCT, + 'class' => '\metastore\Database', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['database'])) { + $this->database = $vals['database']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_create_database_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->database = new \metastore\Database(); + $xfer += $this->database->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_database_args'); + if ($this->database !== null) { + if (!is_object($this->database)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('database', TType::STRUCT, 1); + $xfer += $this->database->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_create_database_result { + static $_TSPEC; + + /** + * @var \metastore\AlreadyExistsException + */ + public $o1 = null; + /** + * @var \metastore\InvalidObjectException + */ + public $o2 = null; + /** + * @var \metastore\MetaException + */ + public $o3 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\AlreadyExistsException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidObjectException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_create_database_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\AlreadyExistsException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\InvalidObjectException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_database_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_database_args { + static $_TSPEC; + + /** + * @var string + */ + public $name = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'name', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['name'])) { + $this->name = $vals['name']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_database_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_database_args'); + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 1); + $xfer += $output->writeString($this->name); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_database_result { + static $_TSPEC; + + /** + * @var \metastore\Database + */ + public $success = null; + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\MetaException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\Database', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_database_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\Database(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_database_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_drop_database_args { + static $_TSPEC; + + /** + * @var string + */ + public $name = null; + /** + * @var bool + */ + public $deleteData = null; + /** + * @var bool + */ + public $cascade = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'deleteData', + 'type' => TType::BOOL, + ), + 3 => array( + 'var' => 'cascade', + 'type' => TType::BOOL, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['name'])) { + $this->name = $vals['name']; + } + if (isset($vals['deleteData'])) { + $this->deleteData = $vals['deleteData']; + } + if (isset($vals['cascade'])) { + $this->cascade = $vals['cascade']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_database_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->deleteData); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->cascade); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_database_args'); + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 1); + $xfer += $output->writeString($this->name); + $xfer += $output->writeFieldEnd(); + } + if ($this->deleteData !== null) { + $xfer += $output->writeFieldBegin('deleteData', TType::BOOL, 2); + $xfer += $output->writeBool($this->deleteData); + $xfer += $output->writeFieldEnd(); + } + if ($this->cascade !== null) { + $xfer += $output->writeFieldBegin('cascade', TType::BOOL, 3); + $xfer += $output->writeBool($this->cascade); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_drop_database_result { static $_TSPEC; /** @@ -13786,14 +14817,14 @@ class ThriftHiveMetastore_get_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size792 = 0; - $_etype795 = 0; - $xfer += $input->readListBegin($_etype795, $_size792); - for ($_i796 = 0; $_i796 < $_size792; ++$_i796) + $_size799 = 0; + $_etype802 = 0; + $xfer += $input->readListBegin($_etype802, $_size799); + for ($_i803 = 0; $_i803 < $_size799; ++$_i803) { - $elem797 = null; - $xfer += $input->readString($elem797); - $this->success []= $elem797; + $elem804 = null; + $xfer += $input->readString($elem804); + $this->success []= $elem804; } $xfer += $input->readListEnd(); } else { @@ -13829,9 +14860,9 @@ class ThriftHiveMetastore_get_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter798) + foreach ($this->success as $iter805) { - $xfer += $output->writeString($iter798); + $xfer += $output->writeString($iter805); } } $output->writeListEnd(); @@ -13962,14 +14993,14 @@ class ThriftHiveMetastore_get_all_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size799 = 0; - $_etype802 = 0; - $xfer += $input->readListBegin($_etype802, $_size799); - for ($_i803 = 0; $_i803 < $_size799; ++$_i803) + $_size806 = 0; + $_etype809 = 0; + $xfer += $input->readListBegin($_etype809, $_size806); + for ($_i810 = 0; $_i810 < $_size806; ++$_i810) { - $elem804 = null; - $xfer += $input->readString($elem804); - $this->success []= $elem804; + $elem811 = null; + $xfer += $input->readString($elem811); + $this->success []= $elem811; } $xfer += $input->readListEnd(); } else { @@ -14005,9 +15036,9 @@ class ThriftHiveMetastore_get_all_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter805) + foreach ($this->success as $iter812) { - $xfer += $output->writeString($iter805); + $xfer += $output->writeString($iter812); } } $output->writeListEnd(); @@ -15008,18 +16039,18 @@ class ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size806 = 0; - $_ktype807 = 0; - $_vtype808 = 0; - $xfer += $input->readMapBegin($_ktype807, $_vtype808, $_size806); - for ($_i810 = 0; $_i810 < $_size806; ++$_i810) + $_size813 = 0; + $_ktype814 = 0; + $_vtype815 = 0; + $xfer += $input->readMapBegin($_ktype814, $_vtype815, $_size813); + for ($_i817 = 0; $_i817 < $_size813; ++$_i817) { - $key811 = ''; - $val812 = new \metastore\Type(); - $xfer += $input->readString($key811); - $val812 = new \metastore\Type(); - $xfer += $val812->read($input); - $this->success[$key811] = $val812; + $key818 = ''; + $val819 = new \metastore\Type(); + $xfer += $input->readString($key818); + $val819 = new \metastore\Type(); + $xfer += $val819->read($input); + $this->success[$key818] = $val819; } $xfer += $input->readMapEnd(); } else { @@ -15055,10 +16086,10 @@ class ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter813 => $viter814) + foreach ($this->success as $kiter820 => $viter821) { - $xfer += $output->writeString($kiter813); - $xfer += $viter814->write($output); + $xfer += $output->writeString($kiter820); + $xfer += $viter821->write($output); } } $output->writeMapEnd(); @@ -15262,15 +16293,15 @@ class ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size815 = 0; - $_etype818 = 0; - $xfer += $input->readListBegin($_etype818, $_size815); - for ($_i819 = 0; $_i819 < $_size815; ++$_i819) + $_size822 = 0; + $_etype825 = 0; + $xfer += $input->readListBegin($_etype825, $_size822); + for ($_i826 = 0; $_i826 < $_size822; ++$_i826) { - $elem820 = null; - $elem820 = new \metastore\FieldSchema(); - $xfer += $elem820->read($input); - $this->success []= $elem820; + $elem827 = null; + $elem827 = new \metastore\FieldSchema(); + $xfer += $elem827->read($input); + $this->success []= $elem827; } $xfer += $input->readListEnd(); } else { @@ -15322,9 +16353,9 @@ class ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter821) + foreach ($this->success as $iter828) { - $xfer += $iter821->write($output); + $xfer += $iter828->write($output); } } $output->writeListEnd(); @@ -15566,15 +16597,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size822 = 0; - $_etype825 = 0; - $xfer += $input->readListBegin($_etype825, $_size822); - for ($_i826 = 0; $_i826 < $_size822; ++$_i826) + $_size829 = 0; + $_etype832 = 0; + $xfer += $input->readListBegin($_etype832, $_size829); + for ($_i833 = 0; $_i833 < $_size829; ++$_i833) { - $elem827 = null; - $elem827 = new \metastore\FieldSchema(); - $xfer += $elem827->read($input); - $this->success []= $elem827; + $elem834 = null; + $elem834 = new \metastore\FieldSchema(); + $xfer += $elem834->read($input); + $this->success []= $elem834; } $xfer += $input->readListEnd(); } else { @@ -15626,9 +16657,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter828) + foreach ($this->success as $iter835) { - $xfer += $iter828->write($output); + $xfer += $iter835->write($output); } } $output->writeListEnd(); @@ -15842,15 +16873,15 @@ class ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size829 = 0; - $_etype832 = 0; - $xfer += $input->readListBegin($_etype832, $_size829); - for ($_i833 = 0; $_i833 < $_size829; ++$_i833) + $_size836 = 0; + $_etype839 = 0; + $xfer += $input->readListBegin($_etype839, $_size836); + for ($_i840 = 0; $_i840 < $_size836; ++$_i840) { - $elem834 = null; - $elem834 = new \metastore\FieldSchema(); - $xfer += $elem834->read($input); - $this->success []= $elem834; + $elem841 = null; + $elem841 = new \metastore\FieldSchema(); + $xfer += $elem841->read($input); + $this->success []= $elem841; } $xfer += $input->readListEnd(); } else { @@ -15902,9 +16933,9 @@ class ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter835) + foreach ($this->success as $iter842) { - $xfer += $iter835->write($output); + $xfer += $iter842->write($output); } } $output->writeListEnd(); @@ -16146,15 +17177,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size836 = 0; - $_etype839 = 0; - $xfer += $input->readListBegin($_etype839, $_size836); - for ($_i840 = 0; $_i840 < $_size836; ++$_i840) + $_size843 = 0; + $_etype846 = 0; + $xfer += $input->readListBegin($_etype846, $_size843); + for ($_i847 = 0; $_i847 < $_size843; ++$_i847) { - $elem841 = null; - $elem841 = new \metastore\FieldSchema(); - $xfer += $elem841->read($input); - $this->success []= $elem841; + $elem848 = null; + $elem848 = new \metastore\FieldSchema(); + $xfer += $elem848->read($input); + $this->success []= $elem848; } $xfer += $input->readListEnd(); } else { @@ -16206,9 +17237,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter842) + foreach ($this->success as $iter849) { - $xfer += $iter842->write($output); + $xfer += $iter849->write($output); } } $output->writeListEnd(); @@ -16880,15 +17911,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 2: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size843 = 0; - $_etype846 = 0; - $xfer += $input->readListBegin($_etype846, $_size843); - for ($_i847 = 0; $_i847 < $_size843; ++$_i847) + $_size850 = 0; + $_etype853 = 0; + $xfer += $input->readListBegin($_etype853, $_size850); + for ($_i854 = 0; $_i854 < $_size850; ++$_i854) { - $elem848 = null; - $elem848 = new \metastore\SQLPrimaryKey(); - $xfer += $elem848->read($input); - $this->primaryKeys []= $elem848; + $elem855 = null; + $elem855 = new \metastore\SQLPrimaryKey(); + $xfer += $elem855->read($input); + $this->primaryKeys []= $elem855; } $xfer += $input->readListEnd(); } else { @@ -16898,15 +17929,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 3: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size849 = 0; - $_etype852 = 0; - $xfer += $input->readListBegin($_etype852, $_size849); - for ($_i853 = 0; $_i853 < $_size849; ++$_i853) + $_size856 = 0; + $_etype859 = 0; + $xfer += $input->readListBegin($_etype859, $_size856); + for ($_i860 = 0; $_i860 < $_size856; ++$_i860) { - $elem854 = null; - $elem854 = new \metastore\SQLForeignKey(); - $xfer += $elem854->read($input); - $this->foreignKeys []= $elem854; + $elem861 = null; + $elem861 = new \metastore\SQLForeignKey(); + $xfer += $elem861->read($input); + $this->foreignKeys []= $elem861; } $xfer += $input->readListEnd(); } else { @@ -16916,15 +17947,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 4: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size855 = 0; - $_etype858 = 0; - $xfer += $input->readListBegin($_etype858, $_size855); - for ($_i859 = 0; $_i859 < $_size855; ++$_i859) + $_size862 = 0; + $_etype865 = 0; + $xfer += $input->readListBegin($_etype865, $_size862); + for ($_i866 = 0; $_i866 < $_size862; ++$_i866) { - $elem860 = null; - $elem860 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem860->read($input); - $this->uniqueConstraints []= $elem860; + $elem867 = null; + $elem867 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem867->read($input); + $this->uniqueConstraints []= $elem867; } $xfer += $input->readListEnd(); } else { @@ -16934,15 +17965,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 5: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size861 = 0; - $_etype864 = 0; - $xfer += $input->readListBegin($_etype864, $_size861); - for ($_i865 = 0; $_i865 < $_size861; ++$_i865) + $_size868 = 0; + $_etype871 = 0; + $xfer += $input->readListBegin($_etype871, $_size868); + for ($_i872 = 0; $_i872 < $_size868; ++$_i872) { - $elem866 = null; - $elem866 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem866->read($input); - $this->notNullConstraints []= $elem866; + $elem873 = null; + $elem873 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem873->read($input); + $this->notNullConstraints []= $elem873; } $xfer += $input->readListEnd(); } else { @@ -16952,15 +17983,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 6: if ($ftype == TType::LST) { $this->defaultConstraints = array(); - $_size867 = 0; - $_etype870 = 0; - $xfer += $input->readListBegin($_etype870, $_size867); - for ($_i871 = 0; $_i871 < $_size867; ++$_i871) + $_size874 = 0; + $_etype877 = 0; + $xfer += $input->readListBegin($_etype877, $_size874); + for ($_i878 = 0; $_i878 < $_size874; ++$_i878) { - $elem872 = null; - $elem872 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem872->read($input); - $this->defaultConstraints []= $elem872; + $elem879 = null; + $elem879 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem879->read($input); + $this->defaultConstraints []= $elem879; } $xfer += $input->readListEnd(); } else { @@ -16970,15 +18001,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 7: if ($ftype == TType::LST) { $this->checkConstraints = array(); - $_size873 = 0; - $_etype876 = 0; - $xfer += $input->readListBegin($_etype876, $_size873); - for ($_i877 = 0; $_i877 < $_size873; ++$_i877) + $_size880 = 0; + $_etype883 = 0; + $xfer += $input->readListBegin($_etype883, $_size880); + for ($_i884 = 0; $_i884 < $_size880; ++$_i884) { - $elem878 = null; - $elem878 = new \metastore\SQLCheckConstraint(); - $xfer += $elem878->read($input); - $this->checkConstraints []= $elem878; + $elem885 = null; + $elem885 = new \metastore\SQLCheckConstraint(); + $xfer += $elem885->read($input); + $this->checkConstraints []= $elem885; } $xfer += $input->readListEnd(); } else { @@ -17014,9 +18045,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter879) + foreach ($this->primaryKeys as $iter886) { - $xfer += $iter879->write($output); + $xfer += $iter886->write($output); } } $output->writeListEnd(); @@ -17031,9 +18062,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter880) + foreach ($this->foreignKeys as $iter887) { - $xfer += $iter880->write($output); + $xfer += $iter887->write($output); } } $output->writeListEnd(); @@ -17048,9 +18079,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter881) + foreach ($this->uniqueConstraints as $iter888) { - $xfer += $iter881->write($output); + $xfer += $iter888->write($output); } } $output->writeListEnd(); @@ -17065,9 +18096,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter882) + foreach ($this->notNullConstraints as $iter889) { - $xfer += $iter882->write($output); + $xfer += $iter889->write($output); } } $output->writeListEnd(); @@ -17082,9 +18113,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); { - foreach ($this->defaultConstraints as $iter883) + foreach ($this->defaultConstraints as $iter890) { - $xfer += $iter883->write($output); + $xfer += $iter890->write($output); } } $output->writeListEnd(); @@ -17099,9 +18130,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->checkConstraints)); { - foreach ($this->checkConstraints as $iter884) + foreach ($this->checkConstraints as $iter891) { - $xfer += $iter884->write($output); + $xfer += $iter891->write($output); } } $output->writeListEnd(); @@ -19101,14 +20132,14 @@ class ThriftHiveMetastore_truncate_table_args { case 3: if ($ftype == TType::LST) { $this->partNames = array(); - $_size885 = 0; - $_etype888 = 0; - $xfer += $input->readListBegin($_etype888, $_size885); - for ($_i889 = 0; $_i889 < $_size885; ++$_i889) + $_size892 = 0; + $_etype895 = 0; + $xfer += $input->readListBegin($_etype895, $_size892); + for ($_i896 = 0; $_i896 < $_size892; ++$_i896) { - $elem890 = null; - $xfer += $input->readString($elem890); - $this->partNames []= $elem890; + $elem897 = null; + $xfer += $input->readString($elem897); + $this->partNames []= $elem897; } $xfer += $input->readListEnd(); } else { @@ -19146,9 +20177,9 @@ class ThriftHiveMetastore_truncate_table_args { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter891) + foreach ($this->partNames as $iter898) { - $xfer += $output->writeString($iter891); + $xfer += $output->writeString($iter898); } } $output->writeListEnd(); @@ -19399,14 +20430,14 @@ class ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size892 = 0; - $_etype895 = 0; - $xfer += $input->readListBegin($_etype895, $_size892); - for ($_i896 = 0; $_i896 < $_size892; ++$_i896) + $_size899 = 0; + $_etype902 = 0; + $xfer += $input->readListBegin($_etype902, $_size899); + for ($_i903 = 0; $_i903 < $_size899; ++$_i903) { - $elem897 = null; - $xfer += $input->readString($elem897); - $this->success []= $elem897; + $elem904 = null; + $xfer += $input->readString($elem904); + $this->success []= $elem904; } $xfer += $input->readListEnd(); } else { @@ -19442,9 +20473,9 @@ class ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter898) + foreach ($this->success as $iter905) { - $xfer += $output->writeString($iter898); + $xfer += $output->writeString($iter905); } } $output->writeListEnd(); @@ -19646,207 +20677,6 @@ class ThriftHiveMetastore_get_tables_by_type_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size899 = 0; - $_etype902 = 0; - $xfer += $input->readListBegin($_etype902, $_size899); - for ($_i903 = 0; $_i903 < $_size899; ++$_i903) - { - $elem904 = null; - $xfer += $input->readString($elem904); - $this->success []= $elem904; - } - $xfer += $input->readListEnd(); - } else { - $xfer += $input->skip($ftype); - } - break; - case 1: - if ($ftype == TType::STRUCT) { - $this->o1 = new \metastore\MetaException(); - $xfer += $this->o1->read($input); - } else { - $xfer += $input->skip($ftype); - } - break; - default: - $xfer += $input->skip($ftype); - break; - } - $xfer += $input->readFieldEnd(); - } - $xfer += $input->readStructEnd(); - return $xfer; - } - - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_tables_by_type_result'); - if ($this->success !== null) { - if (!is_array($this->success)) { - throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); - } - $xfer += $output->writeFieldBegin('success', TType::LST, 0); - { - $output->writeListBegin(TType::STRING, count($this->success)); - { - foreach ($this->success as $iter905) - { - $xfer += $output->writeString($iter905); - } - } - $output->writeListEnd(); - } - $xfer += $output->writeFieldEnd(); - } - if ($this->o1 !== null) { - $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); - $xfer += $this->o1->write($output); - $xfer += $output->writeFieldEnd(); - } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; - } - -} - -class ThriftHiveMetastore_get_materialized_views_for_rewriting_args { - static $_TSPEC; - - /** - * @var string - */ - public $db_name = null; - - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 1 => array( - 'var' => 'db_name', - 'type' => TType::STRING, - ), - ); - } - if (is_array($vals)) { - if (isset($vals['db_name'])) { - $this->db_name = $vals['db_name']; - } - } - } - - public function getName() { - return 'ThriftHiveMetastore_get_materialized_views_for_rewriting_args'; - } - - public function read($input) - { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) - { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->db_name); - } else { - $xfer += $input->skip($ftype); - } - break; - default: - $xfer += $input->skip($ftype); - break; - } - $xfer += $input->readFieldEnd(); - } - $xfer += $input->readStructEnd(); - return $xfer; - } - - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_materialized_views_for_rewriting_args'); - if ($this->db_name !== null) { - $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); - $xfer += $output->writeString($this->db_name); - $xfer += $output->writeFieldEnd(); - } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; - } - -} - -class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { - static $_TSPEC; - - /** - * @var string[] - */ - public $success = null; - /** - * @var \metastore\MetaException - */ - public $o1 = null; - - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 0 => array( - 'var' => 'success', - 'type' => TType::LST, - 'etype' => TType::STRING, - 'elem' => array( - 'type' => TType::STRING, - ), - ), - 1 => array( - 'var' => 'o1', - 'type' => TType::STRUCT, - 'class' => '\metastore\MetaException', - ), - ); - } - if (is_array($vals)) { - if (isset($vals['success'])) { - $this->success = $vals['success']; - } - if (isset($vals['o1'])) { - $this->o1 = $vals['o1']; - } - } - } - - public function getName() { - return 'ThriftHiveMetastore_get_materialized_views_for_rewriting_result'; - } - - public function read($input) - { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) - { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 0: - if ($ftype == TType::LST) { - $this->success = array(); $_size906 = 0; $_etype909 = 0; $xfer += $input->readListBegin($_etype909, $_size906); @@ -19881,7 +20711,7 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_materialized_views_for_rewriting_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_tables_by_type_result'); if ($this->success !== null) { if (!is_array($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -19911,6 +20741,207 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { } +class ThriftHiveMetastore_get_materialized_views_for_rewriting_args { + static $_TSPEC; + + /** + * @var string + */ + public $db_name = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['db_name'])) { + $this->db_name = $vals['db_name']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_materialized_views_for_rewriting_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_materialized_views_for_rewriting_args'); + if ($this->db_name !== null) { + $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); + $xfer += $output->writeString($this->db_name); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { + static $_TSPEC; + + /** + * @var string[] + */ + public $success = null; + /** + * @var \metastore\MetaException + */ + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_materialized_views_for_rewriting_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::LST) { + $this->success = array(); + $_size913 = 0; + $_etype916 = 0; + $xfer += $input->readListBegin($_etype916, $_size913); + for ($_i917 = 0; $_i917 < $_size913; ++$_i917) + { + $elem918 = null; + $xfer += $input->readString($elem918); + $this->success []= $elem918; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_materialized_views_for_rewriting_result'); + if ($this->success !== null) { + if (!is_array($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::LST, 0); + { + $output->writeListBegin(TType::STRING, count($this->success)); + { + foreach ($this->success as $iter919) + { + $xfer += $output->writeString($iter919); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_get_table_meta_args { static $_TSPEC; @@ -19997,14 +21028,14 @@ class ThriftHiveMetastore_get_table_meta_args { case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size913 = 0; - $_etype916 = 0; - $xfer += $input->readListBegin($_etype916, $_size913); - for ($_i917 = 0; $_i917 < $_size913; ++$_i917) + $_size920 = 0; + $_etype923 = 0; + $xfer += $input->readListBegin($_etype923, $_size920); + for ($_i924 = 0; $_i924 < $_size920; ++$_i924) { - $elem918 = null; - $xfer += $input->readString($elem918); - $this->tbl_types []= $elem918; + $elem925 = null; + $xfer += $input->readString($elem925); + $this->tbl_types []= $elem925; } $xfer += $input->readListEnd(); } else { @@ -20042,9 +21073,9 @@ class ThriftHiveMetastore_get_table_meta_args { { $output->writeListBegin(TType::STRING, count($this->tbl_types)); { - foreach ($this->tbl_types as $iter919) + foreach ($this->tbl_types as $iter926) { - $xfer += $output->writeString($iter919); + $xfer += $output->writeString($iter926); } } $output->writeListEnd(); @@ -20121,15 +21152,15 @@ class ThriftHiveMetastore_get_table_meta_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size920 = 0; - $_etype923 = 0; - $xfer += $input->readListBegin($_etype923, $_size920); - for ($_i924 = 0; $_i924 < $_size920; ++$_i924) + $_size927 = 0; + $_etype930 = 0; + $xfer += $input->readListBegin($_etype930, $_size927); + for ($_i931 = 0; $_i931 < $_size927; ++$_i931) { - $elem925 = null; - $elem925 = new \metastore\TableMeta(); - $xfer += $elem925->read($input); - $this->success []= $elem925; + $elem932 = null; + $elem932 = new \metastore\TableMeta(); + $xfer += $elem932->read($input); + $this->success []= $elem932; } $xfer += $input->readListEnd(); } else { @@ -20165,9 +21196,9 @@ class ThriftHiveMetastore_get_table_meta_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter926) + foreach ($this->success as $iter933) { - $xfer += $iter926->write($output); + $xfer += $iter933->write($output); } } $output->writeListEnd(); @@ -20323,14 +21354,14 @@ class ThriftHiveMetastore_get_all_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size927 = 0; - $_etype930 = 0; - $xfer += $input->readListBegin($_etype930, $_size927); - for ($_i931 = 0; $_i931 < $_size927; ++$_i931) + $_size934 = 0; + $_etype937 = 0; + $xfer += $input->readListBegin($_etype937, $_size934); + for ($_i938 = 0; $_i938 < $_size934; ++$_i938) { - $elem932 = null; - $xfer += $input->readString($elem932); - $this->success []= $elem932; + $elem939 = null; + $xfer += $input->readString($elem939); + $this->success []= $elem939; } $xfer += $input->readListEnd(); } else { @@ -20366,9 +21397,9 @@ class ThriftHiveMetastore_get_all_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter933) + foreach ($this->success as $iter940) { - $xfer += $output->writeString($iter933); + $xfer += $output->writeString($iter940); } } $output->writeListEnd(); @@ -20683,14 +21714,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size934 = 0; - $_etype937 = 0; - $xfer += $input->readListBegin($_etype937, $_size934); - for ($_i938 = 0; $_i938 < $_size934; ++$_i938) + $_size941 = 0; + $_etype944 = 0; + $xfer += $input->readListBegin($_etype944, $_size941); + for ($_i945 = 0; $_i945 < $_size941; ++$_i945) { - $elem939 = null; - $xfer += $input->readString($elem939); - $this->tbl_names []= $elem939; + $elem946 = null; + $xfer += $input->readString($elem946); + $this->tbl_names []= $elem946; } $xfer += $input->readListEnd(); } else { @@ -20723,9 +21754,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter940) + foreach ($this->tbl_names as $iter947) { - $xfer += $output->writeString($iter940); + $xfer += $output->writeString($iter947); } } $output->writeListEnd(); @@ -20790,15 +21821,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size941 = 0; - $_etype944 = 0; - $xfer += $input->readListBegin($_etype944, $_size941); - for ($_i945 = 0; $_i945 < $_size941; ++$_i945) + $_size948 = 0; + $_etype951 = 0; + $xfer += $input->readListBegin($_etype951, $_size948); + for ($_i952 = 0; $_i952 < $_size948; ++$_i952) { - $elem946 = null; - $elem946 = new \metastore\Table(); - $xfer += $elem946->read($input); - $this->success []= $elem946; + $elem953 = null; + $elem953 = new \metastore\Table(); + $xfer += $elem953->read($input); + $this->success []= $elem953; } $xfer += $input->readListEnd(); } else { @@ -20826,9 +21857,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter947) + foreach ($this->success as $iter954) { - $xfer += $iter947->write($output); + $xfer += $iter954->write($output); } } $output->writeListEnd(); @@ -21355,14 +22386,14 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size948 = 0; - $_etype951 = 0; - $xfer += $input->readListBegin($_etype951, $_size948); - for ($_i952 = 0; $_i952 < $_size948; ++$_i952) + $_size955 = 0; + $_etype958 = 0; + $xfer += $input->readListBegin($_etype958, $_size955); + for ($_i959 = 0; $_i959 < $_size955; ++$_i959) { - $elem953 = null; - $xfer += $input->readString($elem953); - $this->tbl_names []= $elem953; + $elem960 = null; + $xfer += $input->readString($elem960); + $this->tbl_names []= $elem960; } $xfer += $input->readListEnd(); } else { @@ -21395,9 +22426,9 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter954) + foreach ($this->tbl_names as $iter961) { - $xfer += $output->writeString($iter954); + $xfer += $output->writeString($iter961); } } $output->writeListEnd(); @@ -21502,18 +22533,18 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size955 = 0; - $_ktype956 = 0; - $_vtype957 = 0; - $xfer += $input->readMapBegin($_ktype956, $_vtype957, $_size955); - for ($_i959 = 0; $_i959 < $_size955; ++$_i959) + $_size962 = 0; + $_ktype963 = 0; + $_vtype964 = 0; + $xfer += $input->readMapBegin($_ktype963, $_vtype964, $_size962); + for ($_i966 = 0; $_i966 < $_size962; ++$_i966) { - $key960 = ''; - $val961 = new \metastore\Materialization(); - $xfer += $input->readString($key960); - $val961 = new \metastore\Materialization(); - $xfer += $val961->read($input); - $this->success[$key960] = $val961; + $key967 = ''; + $val968 = new \metastore\Materialization(); + $xfer += $input->readString($key967); + $val968 = new \metastore\Materialization(); + $xfer += $val968->read($input); + $this->success[$key967] = $val968; } $xfer += $input->readMapEnd(); } else { @@ -21565,10 +22596,10 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter962 => $viter963) + foreach ($this->success as $kiter969 => $viter970) { - $xfer += $output->writeString($kiter962); - $xfer += $viter963->write($output); + $xfer += $output->writeString($kiter969); + $xfer += $viter970->write($output); } } $output->writeMapEnd(); @@ -21603,6 +22634,10 @@ class ThriftHiveMetastore_update_creation_metadata_args { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $dbname = null; /** * @var string @@ -21617,14 +22652,18 @@ class ThriftHiveMetastore_update_creation_metadata_args { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'dbname', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( - 'var' => 'tbl_name', + 'var' => 'dbname', 'type' => TType::STRING, ), 3 => array( + 'var' => 'tbl_name', + 'type' => TType::STRING, + ), + 4 => array( 'var' => 'creation_metadata', 'type' => TType::STRUCT, 'class' => '\metastore\CreationMetadata', @@ -21632,6 +22671,9 @@ class ThriftHiveMetastore_update_creation_metadata_args { ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['dbname'])) { $this->dbname = $vals['dbname']; } @@ -21665,19 +22707,26 @@ class ThriftHiveMetastore_update_creation_metadata_args { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->dbname); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->tbl_name); + $xfer += $input->readString($this->dbname); } else { $xfer += $input->skip($ftype); } break; case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tbl_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: if ($ftype == TType::STRUCT) { $this->creation_metadata = new \metastore\CreationMetadata(); $xfer += $this->creation_metadata->read($input); @@ -21698,13 +22747,18 @@ class ThriftHiveMetastore_update_creation_metadata_args { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('ThriftHiveMetastore_update_creation_metadata_args'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->dbname !== null) { - $xfer += $output->writeFieldBegin('dbname', TType::STRING, 1); + $xfer += $output->writeFieldBegin('dbname', TType::STRING, 2); $xfer += $output->writeString($this->dbname); $xfer += $output->writeFieldEnd(); } if ($this->tbl_name !== null) { - $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 3); $xfer += $output->writeString($this->tbl_name); $xfer += $output->writeFieldEnd(); } @@ -21712,7 +22766,7 @@ class ThriftHiveMetastore_update_creation_metadata_args { if (!is_object($this->creation_metadata)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); } - $xfer += $output->writeFieldBegin('creation_metadata', TType::STRUCT, 3); + $xfer += $output->writeFieldBegin('creation_metadata', TType::STRUCT, 4); $xfer += $this->creation_metadata->write($output); $xfer += $output->writeFieldEnd(); } @@ -22057,14 +23111,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size964 = 0; - $_etype967 = 0; - $xfer += $input->readListBegin($_etype967, $_size964); - for ($_i968 = 0; $_i968 < $_size964; ++$_i968) + $_size971 = 0; + $_etype974 = 0; + $xfer += $input->readListBegin($_etype974, $_size971); + for ($_i975 = 0; $_i975 < $_size971; ++$_i975) { - $elem969 = null; - $xfer += $input->readString($elem969); - $this->success []= $elem969; + $elem976 = null; + $xfer += $input->readString($elem976); + $this->success []= $elem976; } $xfer += $input->readListEnd(); } else { @@ -22116,9 +23170,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter970) + foreach ($this->success as $iter977) { - $xfer += $output->writeString($iter970); + $xfer += $output->writeString($iter977); } } $output->writeListEnd(); @@ -23431,15 +24485,15 @@ class ThriftHiveMetastore_add_partitions_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size971 = 0; - $_etype974 = 0; - $xfer += $input->readListBegin($_etype974, $_size971); - for ($_i975 = 0; $_i975 < $_size971; ++$_i975) + $_size978 = 0; + $_etype981 = 0; + $xfer += $input->readListBegin($_etype981, $_size978); + for ($_i982 = 0; $_i982 < $_size978; ++$_i982) { - $elem976 = null; - $elem976 = new \metastore\Partition(); - $xfer += $elem976->read($input); - $this->new_parts []= $elem976; + $elem983 = null; + $elem983 = new \metastore\Partition(); + $xfer += $elem983->read($input); + $this->new_parts []= $elem983; } $xfer += $input->readListEnd(); } else { @@ -23467,9 +24521,9 @@ class ThriftHiveMetastore_add_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter977) + foreach ($this->new_parts as $iter984) { - $xfer += $iter977->write($output); + $xfer += $iter984->write($output); } } $output->writeListEnd(); @@ -23684,15 +24738,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size978 = 0; - $_etype981 = 0; - $xfer += $input->readListBegin($_etype981, $_size978); - for ($_i982 = 0; $_i982 < $_size978; ++$_i982) + $_size985 = 0; + $_etype988 = 0; + $xfer += $input->readListBegin($_etype988, $_size985); + for ($_i989 = 0; $_i989 < $_size985; ++$_i989) { - $elem983 = null; - $elem983 = new \metastore\PartitionSpec(); - $xfer += $elem983->read($input); - $this->new_parts []= $elem983; + $elem990 = null; + $elem990 = new \metastore\PartitionSpec(); + $xfer += $elem990->read($input); + $this->new_parts []= $elem990; } $xfer += $input->readListEnd(); } else { @@ -23720,9 +24774,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter984) + foreach ($this->new_parts as $iter991) { - $xfer += $iter984->write($output); + $xfer += $iter991->write($output); } } $output->writeListEnd(); @@ -23972,14 +25026,14 @@ class ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size985 = 0; - $_etype988 = 0; - $xfer += $input->readListBegin($_etype988, $_size985); - for ($_i989 = 0; $_i989 < $_size985; ++$_i989) + $_size992 = 0; + $_etype995 = 0; + $xfer += $input->readListBegin($_etype995, $_size992); + for ($_i996 = 0; $_i996 < $_size992; ++$_i996) { - $elem990 = null; - $xfer += $input->readString($elem990); - $this->part_vals []= $elem990; + $elem997 = null; + $xfer += $input->readString($elem997); + $this->part_vals []= $elem997; } $xfer += $input->readListEnd(); } else { @@ -24017,9 +25071,9 @@ class ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter991) + foreach ($this->part_vals as $iter998) { - $xfer += $output->writeString($iter991); + $xfer += $output->writeString($iter998); } } $output->writeListEnd(); @@ -24521,14 +25575,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size992 = 0; - $_etype995 = 0; - $xfer += $input->readListBegin($_etype995, $_size992); - for ($_i996 = 0; $_i996 < $_size992; ++$_i996) + $_size999 = 0; + $_etype1002 = 0; + $xfer += $input->readListBegin($_etype1002, $_size999); + for ($_i1003 = 0; $_i1003 < $_size999; ++$_i1003) { - $elem997 = null; - $xfer += $input->readString($elem997); - $this->part_vals []= $elem997; + $elem1004 = null; + $xfer += $input->readString($elem1004); + $this->part_vals []= $elem1004; } $xfer += $input->readListEnd(); } else { @@ -24574,9 +25628,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter998) + foreach ($this->part_vals as $iter1005) { - $xfer += $output->writeString($iter998); + $xfer += $output->writeString($iter1005); } } $output->writeListEnd(); @@ -25430,14 +26484,14 @@ class ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size999 = 0; - $_etype1002 = 0; - $xfer += $input->readListBegin($_etype1002, $_size999); - for ($_i1003 = 0; $_i1003 < $_size999; ++$_i1003) + $_size1006 = 0; + $_etype1009 = 0; + $xfer += $input->readListBegin($_etype1009, $_size1006); + for ($_i1010 = 0; $_i1010 < $_size1006; ++$_i1010) { - $elem1004 = null; - $xfer += $input->readString($elem1004); - $this->part_vals []= $elem1004; + $elem1011 = null; + $xfer += $input->readString($elem1011); + $this->part_vals []= $elem1011; } $xfer += $input->readListEnd(); } else { @@ -25482,9 +26536,9 @@ class ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1005) + foreach ($this->part_vals as $iter1012) { - $xfer += $output->writeString($iter1005); + $xfer += $output->writeString($iter1012); } } $output->writeListEnd(); @@ -25737,14 +26791,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1006 = 0; - $_etype1009 = 0; - $xfer += $input->readListBegin($_etype1009, $_size1006); - for ($_i1010 = 0; $_i1010 < $_size1006; ++$_i1010) + $_size1013 = 0; + $_etype1016 = 0; + $xfer += $input->readListBegin($_etype1016, $_size1013); + for ($_i1017 = 0; $_i1017 < $_size1013; ++$_i1017) { - $elem1011 = null; - $xfer += $input->readString($elem1011); - $this->part_vals []= $elem1011; + $elem1018 = null; + $xfer += $input->readString($elem1018); + $this->part_vals []= $elem1018; } $xfer += $input->readListEnd(); } else { @@ -25797,9 +26851,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1012) + foreach ($this->part_vals as $iter1019) { - $xfer += $output->writeString($iter1012); + $xfer += $output->writeString($iter1019); } } $output->writeListEnd(); @@ -26813,14 +27867,14 @@ class ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1013 = 0; - $_etype1016 = 0; - $xfer += $input->readListBegin($_etype1016, $_size1013); - for ($_i1017 = 0; $_i1017 < $_size1013; ++$_i1017) + $_size1020 = 0; + $_etype1023 = 0; + $xfer += $input->readListBegin($_etype1023, $_size1020); + for ($_i1024 = 0; $_i1024 < $_size1020; ++$_i1024) { - $elem1018 = null; - $xfer += $input->readString($elem1018); - $this->part_vals []= $elem1018; + $elem1025 = null; + $xfer += $input->readString($elem1025); + $this->part_vals []= $elem1025; } $xfer += $input->readListEnd(); } else { @@ -26858,9 +27912,9 @@ class ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1019) + foreach ($this->part_vals as $iter1026) { - $xfer += $output->writeString($iter1019); + $xfer += $output->writeString($iter1026); } } $output->writeListEnd(); @@ -27102,17 +28156,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1020 = 0; - $_ktype1021 = 0; - $_vtype1022 = 0; - $xfer += $input->readMapBegin($_ktype1021, $_vtype1022, $_size1020); - for ($_i1024 = 0; $_i1024 < $_size1020; ++$_i1024) + $_size1027 = 0; + $_ktype1028 = 0; + $_vtype1029 = 0; + $xfer += $input->readMapBegin($_ktype1028, $_vtype1029, $_size1027); + for ($_i1031 = 0; $_i1031 < $_size1027; ++$_i1031) { - $key1025 = ''; - $val1026 = ''; - $xfer += $input->readString($key1025); - $xfer += $input->readString($val1026); - $this->partitionSpecs[$key1025] = $val1026; + $key1032 = ''; + $val1033 = ''; + $xfer += $input->readString($key1032); + $xfer += $input->readString($val1033); + $this->partitionSpecs[$key1032] = $val1033; } $xfer += $input->readMapEnd(); } else { @@ -27168,10 +28222,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1027 => $viter1028) + foreach ($this->partitionSpecs as $kiter1034 => $viter1035) { - $xfer += $output->writeString($kiter1027); - $xfer += $output->writeString($viter1028); + $xfer += $output->writeString($kiter1034); + $xfer += $output->writeString($viter1035); } } $output->writeMapEnd(); @@ -27483,17 +28537,17 @@ class ThriftHiveMetastore_exchange_partitions_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1029 = 0; - $_ktype1030 = 0; - $_vtype1031 = 0; - $xfer += $input->readMapBegin($_ktype1030, $_vtype1031, $_size1029); - for ($_i1033 = 0; $_i1033 < $_size1029; ++$_i1033) + $_size1036 = 0; + $_ktype1037 = 0; + $_vtype1038 = 0; + $xfer += $input->readMapBegin($_ktype1037, $_vtype1038, $_size1036); + for ($_i1040 = 0; $_i1040 < $_size1036; ++$_i1040) { - $key1034 = ''; - $val1035 = ''; - $xfer += $input->readString($key1034); - $xfer += $input->readString($val1035); - $this->partitionSpecs[$key1034] = $val1035; + $key1041 = ''; + $val1042 = ''; + $xfer += $input->readString($key1041); + $xfer += $input->readString($val1042); + $this->partitionSpecs[$key1041] = $val1042; } $xfer += $input->readMapEnd(); } else { @@ -27549,10 +28603,10 @@ class ThriftHiveMetastore_exchange_partitions_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1036 => $viter1037) + foreach ($this->partitionSpecs as $kiter1043 => $viter1044) { - $xfer += $output->writeString($kiter1036); - $xfer += $output->writeString($viter1037); + $xfer += $output->writeString($kiter1043); + $xfer += $output->writeString($viter1044); } } $output->writeMapEnd(); @@ -27685,15 +28739,15 @@ class ThriftHiveMetastore_exchange_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1038 = 0; - $_etype1041 = 0; - $xfer += $input->readListBegin($_etype1041, $_size1038); - for ($_i1042 = 0; $_i1042 < $_size1038; ++$_i1042) + $_size1045 = 0; + $_etype1048 = 0; + $xfer += $input->readListBegin($_etype1048, $_size1045); + for ($_i1049 = 0; $_i1049 < $_size1045; ++$_i1049) { - $elem1043 = null; - $elem1043 = new \metastore\Partition(); - $xfer += $elem1043->read($input); - $this->success []= $elem1043; + $elem1050 = null; + $elem1050 = new \metastore\Partition(); + $xfer += $elem1050->read($input); + $this->success []= $elem1050; } $xfer += $input->readListEnd(); } else { @@ -27753,9 +28807,9 @@ class ThriftHiveMetastore_exchange_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1044) + foreach ($this->success as $iter1051) { - $xfer += $iter1044->write($output); + $xfer += $iter1051->write($output); } } $output->writeListEnd(); @@ -27901,14 +28955,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1045 = 0; - $_etype1048 = 0; - $xfer += $input->readListBegin($_etype1048, $_size1045); - for ($_i1049 = 0; $_i1049 < $_size1045; ++$_i1049) + $_size1052 = 0; + $_etype1055 = 0; + $xfer += $input->readListBegin($_etype1055, $_size1052); + for ($_i1056 = 0; $_i1056 < $_size1052; ++$_i1056) { - $elem1050 = null; - $xfer += $input->readString($elem1050); - $this->part_vals []= $elem1050; + $elem1057 = null; + $xfer += $input->readString($elem1057); + $this->part_vals []= $elem1057; } $xfer += $input->readListEnd(); } else { @@ -27925,14 +28979,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1051 = 0; - $_etype1054 = 0; - $xfer += $input->readListBegin($_etype1054, $_size1051); - for ($_i1055 = 0; $_i1055 < $_size1051; ++$_i1055) + $_size1058 = 0; + $_etype1061 = 0; + $xfer += $input->readListBegin($_etype1061, $_size1058); + for ($_i1062 = 0; $_i1062 < $_size1058; ++$_i1062) { - $elem1056 = null; - $xfer += $input->readString($elem1056); - $this->group_names []= $elem1056; + $elem1063 = null; + $xfer += $input->readString($elem1063); + $this->group_names []= $elem1063; } $xfer += $input->readListEnd(); } else { @@ -27970,9 +29024,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1057) + foreach ($this->part_vals as $iter1064) { - $xfer += $output->writeString($iter1057); + $xfer += $output->writeString($iter1064); } } $output->writeListEnd(); @@ -27992,9 +29046,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1058) + foreach ($this->group_names as $iter1065) { - $xfer += $output->writeString($iter1058); + $xfer += $output->writeString($iter1065); } } $output->writeListEnd(); @@ -28585,15 +29639,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1059 = 0; - $_etype1062 = 0; - $xfer += $input->readListBegin($_etype1062, $_size1059); - for ($_i1063 = 0; $_i1063 < $_size1059; ++$_i1063) + $_size1066 = 0; + $_etype1069 = 0; + $xfer += $input->readListBegin($_etype1069, $_size1066); + for ($_i1070 = 0; $_i1070 < $_size1066; ++$_i1070) { - $elem1064 = null; - $elem1064 = new \metastore\Partition(); - $xfer += $elem1064->read($input); - $this->success []= $elem1064; + $elem1071 = null; + $elem1071 = new \metastore\Partition(); + $xfer += $elem1071->read($input); + $this->success []= $elem1071; } $xfer += $input->readListEnd(); } else { @@ -28637,9 +29691,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1065) + foreach ($this->success as $iter1072) { - $xfer += $iter1065->write($output); + $xfer += $iter1072->write($output); } } $output->writeListEnd(); @@ -28785,14 +29839,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1066 = 0; - $_etype1069 = 0; - $xfer += $input->readListBegin($_etype1069, $_size1066); - for ($_i1070 = 0; $_i1070 < $_size1066; ++$_i1070) + $_size1073 = 0; + $_etype1076 = 0; + $xfer += $input->readListBegin($_etype1076, $_size1073); + for ($_i1077 = 0; $_i1077 < $_size1073; ++$_i1077) { - $elem1071 = null; - $xfer += $input->readString($elem1071); - $this->group_names []= $elem1071; + $elem1078 = null; + $xfer += $input->readString($elem1078); + $this->group_names []= $elem1078; } $xfer += $input->readListEnd(); } else { @@ -28840,9 +29894,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1072) + foreach ($this->group_names as $iter1079) { - $xfer += $output->writeString($iter1072); + $xfer += $output->writeString($iter1079); } } $output->writeListEnd(); @@ -28931,15 +29985,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1073 = 0; - $_etype1076 = 0; - $xfer += $input->readListBegin($_etype1076, $_size1073); - for ($_i1077 = 0; $_i1077 < $_size1073; ++$_i1077) + $_size1080 = 0; + $_etype1083 = 0; + $xfer += $input->readListBegin($_etype1083, $_size1080); + for ($_i1084 = 0; $_i1084 < $_size1080; ++$_i1084) { - $elem1078 = null; - $elem1078 = new \metastore\Partition(); - $xfer += $elem1078->read($input); - $this->success []= $elem1078; + $elem1085 = null; + $elem1085 = new \metastore\Partition(); + $xfer += $elem1085->read($input); + $this->success []= $elem1085; } $xfer += $input->readListEnd(); } else { @@ -28983,9 +30037,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1079) + foreach ($this->success as $iter1086) { - $xfer += $iter1079->write($output); + $xfer += $iter1086->write($output); } } $output->writeListEnd(); @@ -29205,15 +30259,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1080 = 0; - $_etype1083 = 0; - $xfer += $input->readListBegin($_etype1083, $_size1080); - for ($_i1084 = 0; $_i1084 < $_size1080; ++$_i1084) + $_size1087 = 0; + $_etype1090 = 0; + $xfer += $input->readListBegin($_etype1090, $_size1087); + for ($_i1091 = 0; $_i1091 < $_size1087; ++$_i1091) { - $elem1085 = null; - $elem1085 = new \metastore\PartitionSpec(); - $xfer += $elem1085->read($input); - $this->success []= $elem1085; + $elem1092 = null; + $elem1092 = new \metastore\PartitionSpec(); + $xfer += $elem1092->read($input); + $this->success []= $elem1092; } $xfer += $input->readListEnd(); } else { @@ -29257,9 +30311,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1086) + foreach ($this->success as $iter1093) { - $xfer += $iter1086->write($output); + $xfer += $iter1093->write($output); } } $output->writeListEnd(); @@ -29478,14 +30532,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1087 = 0; - $_etype1090 = 0; - $xfer += $input->readListBegin($_etype1090, $_size1087); - for ($_i1091 = 0; $_i1091 < $_size1087; ++$_i1091) + $_size1094 = 0; + $_etype1097 = 0; + $xfer += $input->readListBegin($_etype1097, $_size1094); + for ($_i1098 = 0; $_i1098 < $_size1094; ++$_i1098) { - $elem1092 = null; - $xfer += $input->readString($elem1092); - $this->success []= $elem1092; + $elem1099 = null; + $xfer += $input->readString($elem1099); + $this->success []= $elem1099; } $xfer += $input->readListEnd(); } else { @@ -29529,9 +30583,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1093) + foreach ($this->success as $iter1100) { - $xfer += $output->writeString($iter1093); + $xfer += $output->writeString($iter1100); } } $output->writeListEnd(); @@ -29862,14 +30916,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1094 = 0; - $_etype1097 = 0; - $xfer += $input->readListBegin($_etype1097, $_size1094); - for ($_i1098 = 0; $_i1098 < $_size1094; ++$_i1098) + $_size1101 = 0; + $_etype1104 = 0; + $xfer += $input->readListBegin($_etype1104, $_size1101); + for ($_i1105 = 0; $_i1105 < $_size1101; ++$_i1105) { - $elem1099 = null; - $xfer += $input->readString($elem1099); - $this->part_vals []= $elem1099; + $elem1106 = null; + $xfer += $input->readString($elem1106); + $this->part_vals []= $elem1106; } $xfer += $input->readListEnd(); } else { @@ -29914,9 +30968,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1100) + foreach ($this->part_vals as $iter1107) { - $xfer += $output->writeString($iter1100); + $xfer += $output->writeString($iter1107); } } $output->writeListEnd(); @@ -30010,15 +31064,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1101 = 0; - $_etype1104 = 0; - $xfer += $input->readListBegin($_etype1104, $_size1101); - for ($_i1105 = 0; $_i1105 < $_size1101; ++$_i1105) + $_size1108 = 0; + $_etype1111 = 0; + $xfer += $input->readListBegin($_etype1111, $_size1108); + for ($_i1112 = 0; $_i1112 < $_size1108; ++$_i1112) { - $elem1106 = null; - $elem1106 = new \metastore\Partition(); - $xfer += $elem1106->read($input); - $this->success []= $elem1106; + $elem1113 = null; + $elem1113 = new \metastore\Partition(); + $xfer += $elem1113->read($input); + $this->success []= $elem1113; } $xfer += $input->readListEnd(); } else { @@ -30062,9 +31116,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1107) + foreach ($this->success as $iter1114) { - $xfer += $iter1107->write($output); + $xfer += $iter1114->write($output); } } $output->writeListEnd(); @@ -30211,14 +31265,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1108 = 0; - $_etype1111 = 0; - $xfer += $input->readListBegin($_etype1111, $_size1108); - for ($_i1112 = 0; $_i1112 < $_size1108; ++$_i1112) + $_size1115 = 0; + $_etype1118 = 0; + $xfer += $input->readListBegin($_etype1118, $_size1115); + for ($_i1119 = 0; $_i1119 < $_size1115; ++$_i1119) { - $elem1113 = null; - $xfer += $input->readString($elem1113); - $this->part_vals []= $elem1113; + $elem1120 = null; + $xfer += $input->readString($elem1120); + $this->part_vals []= $elem1120; } $xfer += $input->readListEnd(); } else { @@ -30242,14 +31296,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1114 = 0; - $_etype1117 = 0; - $xfer += $input->readListBegin($_etype1117, $_size1114); - for ($_i1118 = 0; $_i1118 < $_size1114; ++$_i1118) + $_size1121 = 0; + $_etype1124 = 0; + $xfer += $input->readListBegin($_etype1124, $_size1121); + for ($_i1125 = 0; $_i1125 < $_size1121; ++$_i1125) { - $elem1119 = null; - $xfer += $input->readString($elem1119); - $this->group_names []= $elem1119; + $elem1126 = null; + $xfer += $input->readString($elem1126); + $this->group_names []= $elem1126; } $xfer += $input->readListEnd(); } else { @@ -30287,9 +31341,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1120) + foreach ($this->part_vals as $iter1127) { - $xfer += $output->writeString($iter1120); + $xfer += $output->writeString($iter1127); } } $output->writeListEnd(); @@ -30314,9 +31368,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1121) + foreach ($this->group_names as $iter1128) { - $xfer += $output->writeString($iter1121); + $xfer += $output->writeString($iter1128); } } $output->writeListEnd(); @@ -30405,15 +31459,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1122 = 0; - $_etype1125 = 0; - $xfer += $input->readListBegin($_etype1125, $_size1122); - for ($_i1126 = 0; $_i1126 < $_size1122; ++$_i1126) + $_size1129 = 0; + $_etype1132 = 0; + $xfer += $input->readListBegin($_etype1132, $_size1129); + for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133) { - $elem1127 = null; - $elem1127 = new \metastore\Partition(); - $xfer += $elem1127->read($input); - $this->success []= $elem1127; + $elem1134 = null; + $elem1134 = new \metastore\Partition(); + $xfer += $elem1134->read($input); + $this->success []= $elem1134; } $xfer += $input->readListEnd(); } else { @@ -30457,9 +31511,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1128) + foreach ($this->success as $iter1135) { - $xfer += $iter1128->write($output); + $xfer += $iter1135->write($output); } } $output->writeListEnd(); @@ -30580,14 +31634,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1129 = 0; - $_etype1132 = 0; - $xfer += $input->readListBegin($_etype1132, $_size1129); - for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133) + $_size1136 = 0; + $_etype1139 = 0; + $xfer += $input->readListBegin($_etype1139, $_size1136); + for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140) { - $elem1134 = null; - $xfer += $input->readString($elem1134); - $this->part_vals []= $elem1134; + $elem1141 = null; + $xfer += $input->readString($elem1141); + $this->part_vals []= $elem1141; } $xfer += $input->readListEnd(); } else { @@ -30632,9 +31686,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1135) + foreach ($this->part_vals as $iter1142) { - $xfer += $output->writeString($iter1135); + $xfer += $output->writeString($iter1142); } } $output->writeListEnd(); @@ -30727,14 +31781,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1136 = 0; - $_etype1139 = 0; - $xfer += $input->readListBegin($_etype1139, $_size1136); - for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140) + $_size1143 = 0; + $_etype1146 = 0; + $xfer += $input->readListBegin($_etype1146, $_size1143); + for ($_i1147 = 0; $_i1147 < $_size1143; ++$_i1147) { - $elem1141 = null; - $xfer += $input->readString($elem1141); - $this->success []= $elem1141; + $elem1148 = null; + $xfer += $input->readString($elem1148); + $this->success []= $elem1148; } $xfer += $input->readListEnd(); } else { @@ -30778,9 +31832,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1142) + foreach ($this->success as $iter1149) { - $xfer += $output->writeString($iter1142); + $xfer += $output->writeString($iter1149); } } $output->writeListEnd(); @@ -31023,15 +32077,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1143 = 0; - $_etype1146 = 0; - $xfer += $input->readListBegin($_etype1146, $_size1143); - for ($_i1147 = 0; $_i1147 < $_size1143; ++$_i1147) + $_size1150 = 0; + $_etype1153 = 0; + $xfer += $input->readListBegin($_etype1153, $_size1150); + for ($_i1154 = 0; $_i1154 < $_size1150; ++$_i1154) { - $elem1148 = null; - $elem1148 = new \metastore\Partition(); - $xfer += $elem1148->read($input); - $this->success []= $elem1148; + $elem1155 = null; + $elem1155 = new \metastore\Partition(); + $xfer += $elem1155->read($input); + $this->success []= $elem1155; } $xfer += $input->readListEnd(); } else { @@ -31075,9 +32129,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1149) + foreach ($this->success as $iter1156) { - $xfer += $iter1149->write($output); + $xfer += $iter1156->write($output); } } $output->writeListEnd(); @@ -31320,15 +32374,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1150 = 0; - $_etype1153 = 0; - $xfer += $input->readListBegin($_etype1153, $_size1150); - for ($_i1154 = 0; $_i1154 < $_size1150; ++$_i1154) + $_size1157 = 0; + $_etype1160 = 0; + $xfer += $input->readListBegin($_etype1160, $_size1157); + for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161) { - $elem1155 = null; - $elem1155 = new \metastore\PartitionSpec(); - $xfer += $elem1155->read($input); - $this->success []= $elem1155; + $elem1162 = null; + $elem1162 = new \metastore\PartitionSpec(); + $xfer += $elem1162->read($input); + $this->success []= $elem1162; } $xfer += $input->readListEnd(); } else { @@ -31372,9 +32426,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1156) + foreach ($this->success as $iter1163) { - $xfer += $iter1156->write($output); + $xfer += $iter1163->write($output); } } $output->writeListEnd(); @@ -31940,14 +32994,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size1157 = 0; - $_etype1160 = 0; - $xfer += $input->readListBegin($_etype1160, $_size1157); - for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161) + $_size1164 = 0; + $_etype1167 = 0; + $xfer += $input->readListBegin($_etype1167, $_size1164); + for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168) { - $elem1162 = null; - $xfer += $input->readString($elem1162); - $this->names []= $elem1162; + $elem1169 = null; + $xfer += $input->readString($elem1169); + $this->names []= $elem1169; } $xfer += $input->readListEnd(); } else { @@ -31985,9 +33039,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter1163) + foreach ($this->names as $iter1170) { - $xfer += $output->writeString($iter1163); + $xfer += $output->writeString($iter1170); } } $output->writeListEnd(); @@ -32076,15 +33130,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1164 = 0; - $_etype1167 = 0; - $xfer += $input->readListBegin($_etype1167, $_size1164); - for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168) + $_size1171 = 0; + $_etype1174 = 0; + $xfer += $input->readListBegin($_etype1174, $_size1171); + for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175) { - $elem1169 = null; - $elem1169 = new \metastore\Partition(); - $xfer += $elem1169->read($input); - $this->success []= $elem1169; + $elem1176 = null; + $elem1176 = new \metastore\Partition(); + $xfer += $elem1176->read($input); + $this->success []= $elem1176; } $xfer += $input->readListEnd(); } else { @@ -32128,9 +33182,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1170) + foreach ($this->success as $iter1177) { - $xfer += $iter1170->write($output); + $xfer += $iter1177->write($output); } } $output->writeListEnd(); @@ -32469,15 +33523,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1171 = 0; - $_etype1174 = 0; - $xfer += $input->readListBegin($_etype1174, $_size1171); - for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175) + $_size1178 = 0; + $_etype1181 = 0; + $xfer += $input->readListBegin($_etype1181, $_size1178); + for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182) { - $elem1176 = null; - $elem1176 = new \metastore\Partition(); - $xfer += $elem1176->read($input); - $this->new_parts []= $elem1176; + $elem1183 = null; + $elem1183 = new \metastore\Partition(); + $xfer += $elem1183->read($input); + $this->new_parts []= $elem1183; } $xfer += $input->readListEnd(); } else { @@ -32515,9 +33569,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1177) + foreach ($this->new_parts as $iter1184) { - $xfer += $iter1177->write($output); + $xfer += $iter1184->write($output); } } $output->writeListEnd(); @@ -32732,15 +33786,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1178 = 0; - $_etype1181 = 0; - $xfer += $input->readListBegin($_etype1181, $_size1178); - for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182) + $_size1185 = 0; + $_etype1188 = 0; + $xfer += $input->readListBegin($_etype1188, $_size1185); + for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189) { - $elem1183 = null; - $elem1183 = new \metastore\Partition(); - $xfer += $elem1183->read($input); - $this->new_parts []= $elem1183; + $elem1190 = null; + $elem1190 = new \metastore\Partition(); + $xfer += $elem1190->read($input); + $this->new_parts []= $elem1190; } $xfer += $input->readListEnd(); } else { @@ -32786,9 +33840,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1184) + foreach ($this->new_parts as $iter1191) { - $xfer += $iter1184->write($output); + $xfer += $iter1191->write($output); } } $output->writeListEnd(); @@ -33266,14 +34320,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1185 = 0; - $_etype1188 = 0; - $xfer += $input->readListBegin($_etype1188, $_size1185); - for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189) + $_size1192 = 0; + $_etype1195 = 0; + $xfer += $input->readListBegin($_etype1195, $_size1192); + for ($_i1196 = 0; $_i1196 < $_size1192; ++$_i1196) { - $elem1190 = null; - $xfer += $input->readString($elem1190); - $this->part_vals []= $elem1190; + $elem1197 = null; + $xfer += $input->readString($elem1197); + $this->part_vals []= $elem1197; } $xfer += $input->readListEnd(); } else { @@ -33319,9 +34373,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1191) + foreach ($this->part_vals as $iter1198) { - $xfer += $output->writeString($iter1191); + $xfer += $output->writeString($iter1198); } } $output->writeListEnd(); @@ -33506,14 +34560,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1192 = 0; - $_etype1195 = 0; - $xfer += $input->readListBegin($_etype1195, $_size1192); - for ($_i1196 = 0; $_i1196 < $_size1192; ++$_i1196) + $_size1199 = 0; + $_etype1202 = 0; + $xfer += $input->readListBegin($_etype1202, $_size1199); + for ($_i1203 = 0; $_i1203 < $_size1199; ++$_i1203) { - $elem1197 = null; - $xfer += $input->readString($elem1197); - $this->part_vals []= $elem1197; + $elem1204 = null; + $xfer += $input->readString($elem1204); + $this->part_vals []= $elem1204; } $xfer += $input->readListEnd(); } else { @@ -33548,9 +34602,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1198) + foreach ($this->part_vals as $iter1205) { - $xfer += $output->writeString($iter1198); + $xfer += $output->writeString($iter1205); } } $output->writeListEnd(); @@ -34004,14 +35058,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1199 = 0; - $_etype1202 = 0; - $xfer += $input->readListBegin($_etype1202, $_size1199); - for ($_i1203 = 0; $_i1203 < $_size1199; ++$_i1203) + $_size1206 = 0; + $_etype1209 = 0; + $xfer += $input->readListBegin($_etype1209, $_size1206); + for ($_i1210 = 0; $_i1210 < $_size1206; ++$_i1210) { - $elem1204 = null; - $xfer += $input->readString($elem1204); - $this->success []= $elem1204; + $elem1211 = null; + $xfer += $input->readString($elem1211); + $this->success []= $elem1211; } $xfer += $input->readListEnd(); } else { @@ -34047,9 +35101,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1205) + foreach ($this->success as $iter1212) { - $xfer += $output->writeString($iter1205); + $xfer += $output->writeString($iter1212); } } $output->writeListEnd(); @@ -34209,17 +35263,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1206 = 0; - $_ktype1207 = 0; - $_vtype1208 = 0; - $xfer += $input->readMapBegin($_ktype1207, $_vtype1208, $_size1206); - for ($_i1210 = 0; $_i1210 < $_size1206; ++$_i1210) + $_size1213 = 0; + $_ktype1214 = 0; + $_vtype1215 = 0; + $xfer += $input->readMapBegin($_ktype1214, $_vtype1215, $_size1213); + for ($_i1217 = 0; $_i1217 < $_size1213; ++$_i1217) { - $key1211 = ''; - $val1212 = ''; - $xfer += $input->readString($key1211); - $xfer += $input->readString($val1212); - $this->success[$key1211] = $val1212; + $key1218 = ''; + $val1219 = ''; + $xfer += $input->readString($key1218); + $xfer += $input->readString($val1219); + $this->success[$key1218] = $val1219; } $xfer += $input->readMapEnd(); } else { @@ -34255,10 +35309,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter1213 => $viter1214) + foreach ($this->success as $kiter1220 => $viter1221) { - $xfer += $output->writeString($kiter1213); - $xfer += $output->writeString($viter1214); + $xfer += $output->writeString($kiter1220); + $xfer += $output->writeString($viter1221); } } $output->writeMapEnd(); @@ -34378,17 +35432,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1215 = 0; - $_ktype1216 = 0; - $_vtype1217 = 0; - $xfer += $input->readMapBegin($_ktype1216, $_vtype1217, $_size1215); - for ($_i1219 = 0; $_i1219 < $_size1215; ++$_i1219) + $_size1222 = 0; + $_ktype1223 = 0; + $_vtype1224 = 0; + $xfer += $input->readMapBegin($_ktype1223, $_vtype1224, $_size1222); + for ($_i1226 = 0; $_i1226 < $_size1222; ++$_i1226) { - $key1220 = ''; - $val1221 = ''; - $xfer += $input->readString($key1220); - $xfer += $input->readString($val1221); - $this->part_vals[$key1220] = $val1221; + $key1227 = ''; + $val1228 = ''; + $xfer += $input->readString($key1227); + $xfer += $input->readString($val1228); + $this->part_vals[$key1227] = $val1228; } $xfer += $input->readMapEnd(); } else { @@ -34433,10 +35487,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1222 => $viter1223) + foreach ($this->part_vals as $kiter1229 => $viter1230) { - $xfer += $output->writeString($kiter1222); - $xfer += $output->writeString($viter1223); + $xfer += $output->writeString($kiter1229); + $xfer += $output->writeString($viter1230); } } $output->writeMapEnd(); @@ -34758,17 +35812,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1224 = 0; - $_ktype1225 = 0; - $_vtype1226 = 0; - $xfer += $input->readMapBegin($_ktype1225, $_vtype1226, $_size1224); - for ($_i1228 = 0; $_i1228 < $_size1224; ++$_i1228) + $_size1231 = 0; + $_ktype1232 = 0; + $_vtype1233 = 0; + $xfer += $input->readMapBegin($_ktype1232, $_vtype1233, $_size1231); + for ($_i1235 = 0; $_i1235 < $_size1231; ++$_i1235) { - $key1229 = ''; - $val1230 = ''; - $xfer += $input->readString($key1229); - $xfer += $input->readString($val1230); - $this->part_vals[$key1229] = $val1230; + $key1236 = ''; + $val1237 = ''; + $xfer += $input->readString($key1236); + $xfer += $input->readString($val1237); + $this->part_vals[$key1236] = $val1237; } $xfer += $input->readMapEnd(); } else { @@ -34813,10 +35867,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1231 => $viter1232) + foreach ($this->part_vals as $kiter1238 => $viter1239) { - $xfer += $output->writeString($kiter1231); - $xfer += $output->writeString($viter1232); + $xfer += $output->writeString($kiter1238); + $xfer += $output->writeString($viter1239); } } $output->writeMapEnd(); @@ -39775,14 +40829,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1233 = 0; - $_etype1236 = 0; - $xfer += $input->readListBegin($_etype1236, $_size1233); - for ($_i1237 = 0; $_i1237 < $_size1233; ++$_i1237) + $_size1240 = 0; + $_etype1243 = 0; + $xfer += $input->readListBegin($_etype1243, $_size1240); + for ($_i1244 = 0; $_i1244 < $_size1240; ++$_i1244) { - $elem1238 = null; - $xfer += $input->readString($elem1238); - $this->success []= $elem1238; + $elem1245 = null; + $xfer += $input->readString($elem1245); + $this->success []= $elem1245; } $xfer += $input->readListEnd(); } else { @@ -39818,9 +40872,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1239) + foreach ($this->success as $iter1246) { - $xfer += $output->writeString($iter1239); + $xfer += $output->writeString($iter1246); } } $output->writeListEnd(); @@ -40689,14 +41743,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1240 = 0; - $_etype1243 = 0; - $xfer += $input->readListBegin($_etype1243, $_size1240); - for ($_i1244 = 0; $_i1244 < $_size1240; ++$_i1244) + $_size1247 = 0; + $_etype1250 = 0; + $xfer += $input->readListBegin($_etype1250, $_size1247); + for ($_i1251 = 0; $_i1251 < $_size1247; ++$_i1251) { - $elem1245 = null; - $xfer += $input->readString($elem1245); - $this->success []= $elem1245; + $elem1252 = null; + $xfer += $input->readString($elem1252); + $this->success []= $elem1252; } $xfer += $input->readListEnd(); } else { @@ -40732,9 +41786,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1246) + foreach ($this->success as $iter1253) { - $xfer += $output->writeString($iter1246); + $xfer += $output->writeString($iter1253); } } $output->writeListEnd(); @@ -41425,15 +42479,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1247 = 0; - $_etype1250 = 0; - $xfer += $input->readListBegin($_etype1250, $_size1247); - for ($_i1251 = 0; $_i1251 < $_size1247; ++$_i1251) + $_size1254 = 0; + $_etype1257 = 0; + $xfer += $input->readListBegin($_etype1257, $_size1254); + for ($_i1258 = 0; $_i1258 < $_size1254; ++$_i1258) { - $elem1252 = null; - $elem1252 = new \metastore\Role(); - $xfer += $elem1252->read($input); - $this->success []= $elem1252; + $elem1259 = null; + $elem1259 = new \metastore\Role(); + $xfer += $elem1259->read($input); + $this->success []= $elem1259; } $xfer += $input->readListEnd(); } else { @@ -41469,9 +42523,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1253) + foreach ($this->success as $iter1260) { - $xfer += $iter1253->write($output); + $xfer += $iter1260->write($output); } } $output->writeListEnd(); @@ -42133,14 +43187,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1254 = 0; - $_etype1257 = 0; - $xfer += $input->readListBegin($_etype1257, $_size1254); - for ($_i1258 = 0; $_i1258 < $_size1254; ++$_i1258) + $_size1261 = 0; + $_etype1264 = 0; + $xfer += $input->readListBegin($_etype1264, $_size1261); + for ($_i1265 = 0; $_i1265 < $_size1261; ++$_i1265) { - $elem1259 = null; - $xfer += $input->readString($elem1259); - $this->group_names []= $elem1259; + $elem1266 = null; + $xfer += $input->readString($elem1266); + $this->group_names []= $elem1266; } $xfer += $input->readListEnd(); } else { @@ -42181,9 +43235,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1260) + foreach ($this->group_names as $iter1267) { - $xfer += $output->writeString($iter1260); + $xfer += $output->writeString($iter1267); } } $output->writeListEnd(); @@ -42491,15 +43545,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1261 = 0; - $_etype1264 = 0; - $xfer += $input->readListBegin($_etype1264, $_size1261); - for ($_i1265 = 0; $_i1265 < $_size1261; ++$_i1265) + $_size1268 = 0; + $_etype1271 = 0; + $xfer += $input->readListBegin($_etype1271, $_size1268); + for ($_i1272 = 0; $_i1272 < $_size1268; ++$_i1272) { - $elem1266 = null; - $elem1266 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem1266->read($input); - $this->success []= $elem1266; + $elem1273 = null; + $elem1273 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem1273->read($input); + $this->success []= $elem1273; } $xfer += $input->readListEnd(); } else { @@ -42535,9 +43589,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1267) + foreach ($this->success as $iter1274) { - $xfer += $iter1267->write($output); + $xfer += $iter1274->write($output); } } $output->writeListEnd(); @@ -43169,14 +44223,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1268 = 0; - $_etype1271 = 0; - $xfer += $input->readListBegin($_etype1271, $_size1268); - for ($_i1272 = 0; $_i1272 < $_size1268; ++$_i1272) + $_size1275 = 0; + $_etype1278 = 0; + $xfer += $input->readListBegin($_etype1278, $_size1275); + for ($_i1279 = 0; $_i1279 < $_size1275; ++$_i1279) { - $elem1273 = null; - $xfer += $input->readString($elem1273); - $this->group_names []= $elem1273; + $elem1280 = null; + $xfer += $input->readString($elem1280); + $this->group_names []= $elem1280; } $xfer += $input->readListEnd(); } else { @@ -43209,9 +44263,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1274) + foreach ($this->group_names as $iter1281) { - $xfer += $output->writeString($iter1274); + $xfer += $output->writeString($iter1281); } } $output->writeListEnd(); @@ -43287,14 +44341,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1275 = 0; - $_etype1278 = 0; - $xfer += $input->readListBegin($_etype1278, $_size1275); - for ($_i1279 = 0; $_i1279 < $_size1275; ++$_i1279) + $_size1282 = 0; + $_etype1285 = 0; + $xfer += $input->readListBegin($_etype1285, $_size1282); + for ($_i1286 = 0; $_i1286 < $_size1282; ++$_i1286) { - $elem1280 = null; - $xfer += $input->readString($elem1280); - $this->success []= $elem1280; + $elem1287 = null; + $xfer += $input->readString($elem1287); + $this->success []= $elem1287; } $xfer += $input->readListEnd(); } else { @@ -43330,9 +44384,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1281) + foreach ($this->success as $iter1288) { - $xfer += $output->writeString($iter1281); + $xfer += $output->writeString($iter1288); } } $output->writeListEnd(); @@ -44449,14 +45503,14 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1282 = 0; - $_etype1285 = 0; - $xfer += $input->readListBegin($_etype1285, $_size1282); - for ($_i1286 = 0; $_i1286 < $_size1282; ++$_i1286) + $_size1289 = 0; + $_etype1292 = 0; + $xfer += $input->readListBegin($_etype1292, $_size1289); + for ($_i1293 = 0; $_i1293 < $_size1289; ++$_i1293) { - $elem1287 = null; - $xfer += $input->readString($elem1287); - $this->success []= $elem1287; + $elem1294 = null; + $xfer += $input->readString($elem1294); + $this->success []= $elem1294; } $xfer += $input->readListEnd(); } else { @@ -44484,9 +45538,9 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1288) + foreach ($this->success as $iter1295) { - $xfer += $output->writeString($iter1288); + $xfer += $output->writeString($iter1295); } } $output->writeListEnd(); @@ -45125,14 +46179,14 @@ class ThriftHiveMetastore_get_master_keys_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1289 = 0; - $_etype1292 = 0; - $xfer += $input->readListBegin($_etype1292, $_size1289); - for ($_i1293 = 0; $_i1293 < $_size1289; ++$_i1293) + $_size1296 = 0; + $_etype1299 = 0; + $xfer += $input->readListBegin($_etype1299, $_size1296); + for ($_i1300 = 0; $_i1300 < $_size1296; ++$_i1300) { - $elem1294 = null; - $xfer += $input->readString($elem1294); - $this->success []= $elem1294; + $elem1301 = null; + $xfer += $input->readString($elem1301); + $this->success []= $elem1301; } $xfer += $input->readListEnd(); } else { @@ -45160,9 +46214,9 @@ class ThriftHiveMetastore_get_master_keys_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1295) + foreach ($this->success as $iter1302) { - $xfer += $output->writeString($iter1295); + $xfer += $output->writeString($iter1302); } } $output->writeListEnd(); @@ -55701,15 +56755,15 @@ class ThriftHiveMetastore_get_schema_all_versions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1296 = 0; - $_etype1299 = 0; - $xfer += $input->readListBegin($_etype1299, $_size1296); - for ($_i1300 = 0; $_i1300 < $_size1296; ++$_i1300) + $_size1303 = 0; + $_etype1306 = 0; + $xfer += $input->readListBegin($_etype1306, $_size1303); + for ($_i1307 = 0; $_i1307 < $_size1303; ++$_i1307) { - $elem1301 = null; - $elem1301 = new \metastore\SchemaVersion(); - $xfer += $elem1301->read($input); - $this->success []= $elem1301; + $elem1308 = null; + $elem1308 = new \metastore\SchemaVersion(); + $xfer += $elem1308->read($input); + $this->success []= $elem1308; } $xfer += $input->readListEnd(); } else { @@ -55753,9 +56807,9 @@ class ThriftHiveMetastore_get_schema_all_versions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1302) + foreach ($this->success as $iter1309) { - $xfer += $iter1302->write($output); + $xfer += $iter1309->write($output); } } $output->writeListEnd(); diff --git standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php index 32c8cb7bc5..49f71b52ed 100644 --- standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php +++ standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -511,6 +511,10 @@ class SQLPrimaryKey { * @var bool */ public $rely_cstr = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -547,6 +551,10 @@ class SQLPrimaryKey { 'var' => 'rely_cstr', 'type' => TType::BOOL, ), + 9 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -574,6 +582,9 @@ class SQLPrimaryKey { if (isset($vals['rely_cstr'])) { $this->rely_cstr = $vals['rely_cstr']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -652,6 +663,13 @@ class SQLPrimaryKey { $xfer += $input->skip($ftype); } break; + case 9: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -705,6 +723,11 @@ class SQLPrimaryKey { $xfer += $output->writeBool($this->rely_cstr); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 9); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -771,6 +794,10 @@ class SQLForeignKey { * @var bool */ public $rely_cstr = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -831,6 +858,10 @@ class SQLForeignKey { 'var' => 'rely_cstr', 'type' => TType::BOOL, ), + 15 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -876,6 +907,9 @@ class SQLForeignKey { if (isset($vals['rely_cstr'])) { $this->rely_cstr = $vals['rely_cstr']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -996,6 +1030,13 @@ class SQLForeignKey { $xfer += $input->skip($ftype); } break; + case 15: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -1079,6 +1120,11 @@ class SQLForeignKey { $xfer += $output->writeBool($this->rely_cstr); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 15); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -1092,6 +1138,10 @@ class SQLUniqueConstraint { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $table_db = null; /** * @var string @@ -1126,40 +1176,47 @@ class SQLUniqueConstraint { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'table_db', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( - 'var' => 'table_name', + 'var' => 'table_db', 'type' => TType::STRING, ), 3 => array( - 'var' => 'column_name', + 'var' => 'table_name', 'type' => TType::STRING, ), 4 => array( + 'var' => 'column_name', + 'type' => TType::STRING, + ), + 5 => array( 'var' => 'key_seq', 'type' => TType::I32, ), - 5 => array( + 6 => array( 'var' => 'uk_name', 'type' => TType::STRING, ), - 6 => array( + 7 => array( 'var' => 'enable_cstr', 'type' => TType::BOOL, ), - 7 => array( + 8 => array( 'var' => 'validate_cstr', 'type' => TType::BOOL, ), - 8 => array( + 9 => array( 'var' => 'rely_cstr', 'type' => TType::BOOL, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['table_db'])) { $this->table_db = $vals['table_db']; } @@ -1208,54 +1265,61 @@ class SQLUniqueConstraint { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->table_db); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->table_name); + $xfer += $input->readString($this->table_db); } else { $xfer += $input->skip($ftype); } break; case 3: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->column_name); + $xfer += $input->readString($this->table_name); } else { $xfer += $input->skip($ftype); } break; case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->column_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: if ($ftype == TType::I32) { $xfer += $input->readI32($this->key_seq); } else { $xfer += $input->skip($ftype); } break; - case 5: + case 6: if ($ftype == TType::STRING) { $xfer += $input->readString($this->uk_name); } else { $xfer += $input->skip($ftype); } break; - case 6: + case 7: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->enable_cstr); } else { $xfer += $input->skip($ftype); } break; - case 7: + case 8: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->validate_cstr); } else { $xfer += $input->skip($ftype); } break; - case 8: + case 9: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->rely_cstr); } else { @@ -1275,43 +1339,48 @@ class SQLUniqueConstraint { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('SQLUniqueConstraint'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->table_db !== null) { - $xfer += $output->writeFieldBegin('table_db', TType::STRING, 1); + $xfer += $output->writeFieldBegin('table_db', TType::STRING, 2); $xfer += $output->writeString($this->table_db); $xfer += $output->writeFieldEnd(); } if ($this->table_name !== null) { - $xfer += $output->writeFieldBegin('table_name', TType::STRING, 2); + $xfer += $output->writeFieldBegin('table_name', TType::STRING, 3); $xfer += $output->writeString($this->table_name); $xfer += $output->writeFieldEnd(); } if ($this->column_name !== null) { - $xfer += $output->writeFieldBegin('column_name', TType::STRING, 3); + $xfer += $output->writeFieldBegin('column_name', TType::STRING, 4); $xfer += $output->writeString($this->column_name); $xfer += $output->writeFieldEnd(); } if ($this->key_seq !== null) { - $xfer += $output->writeFieldBegin('key_seq', TType::I32, 4); + $xfer += $output->writeFieldBegin('key_seq', TType::I32, 5); $xfer += $output->writeI32($this->key_seq); $xfer += $output->writeFieldEnd(); } if ($this->uk_name !== null) { - $xfer += $output->writeFieldBegin('uk_name', TType::STRING, 5); + $xfer += $output->writeFieldBegin('uk_name', TType::STRING, 6); $xfer += $output->writeString($this->uk_name); $xfer += $output->writeFieldEnd(); } if ($this->enable_cstr !== null) { - $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 6); + $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 7); $xfer += $output->writeBool($this->enable_cstr); $xfer += $output->writeFieldEnd(); } if ($this->validate_cstr !== null) { - $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 7); + $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 8); $xfer += $output->writeBool($this->validate_cstr); $xfer += $output->writeFieldEnd(); } if ($this->rely_cstr !== null) { - $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 8); + $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 9); $xfer += $output->writeBool($this->rely_cstr); $xfer += $output->writeFieldEnd(); } @@ -1328,6 +1397,10 @@ class SQLNotNullConstraint { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $table_db = null; /** * @var string @@ -1358,36 +1431,43 @@ class SQLNotNullConstraint { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'table_db', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( - 'var' => 'table_name', + 'var' => 'table_db', 'type' => TType::STRING, ), 3 => array( - 'var' => 'column_name', + 'var' => 'table_name', 'type' => TType::STRING, ), 4 => array( - 'var' => 'nn_name', + 'var' => 'column_name', 'type' => TType::STRING, ), 5 => array( + 'var' => 'nn_name', + 'type' => TType::STRING, + ), + 6 => array( 'var' => 'enable_cstr', 'type' => TType::BOOL, ), - 6 => array( + 7 => array( 'var' => 'validate_cstr', 'type' => TType::BOOL, ), - 7 => array( + 8 => array( 'var' => 'rely_cstr', 'type' => TType::BOOL, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['table_db'])) { $this->table_db = $vals['table_db']; } @@ -1433,47 +1513,54 @@ class SQLNotNullConstraint { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->table_db); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->table_name); + $xfer += $input->readString($this->table_db); } else { $xfer += $input->skip($ftype); } break; case 3: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->column_name); + $xfer += $input->readString($this->table_name); } else { $xfer += $input->skip($ftype); } break; case 4: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->nn_name); + $xfer += $input->readString($this->column_name); } else { $xfer += $input->skip($ftype); } break; case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->nn_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 6: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->enable_cstr); } else { $xfer += $input->skip($ftype); } break; - case 6: + case 7: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->validate_cstr); } else { $xfer += $input->skip($ftype); } break; - case 7: + case 8: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->rely_cstr); } else { @@ -1493,38 +1580,43 @@ class SQLNotNullConstraint { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('SQLNotNullConstraint'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->table_db !== null) { - $xfer += $output->writeFieldBegin('table_db', TType::STRING, 1); + $xfer += $output->writeFieldBegin('table_db', TType::STRING, 2); $xfer += $output->writeString($this->table_db); $xfer += $output->writeFieldEnd(); } if ($this->table_name !== null) { - $xfer += $output->writeFieldBegin('table_name', TType::STRING, 2); + $xfer += $output->writeFieldBegin('table_name', TType::STRING, 3); $xfer += $output->writeString($this->table_name); $xfer += $output->writeFieldEnd(); } if ($this->column_name !== null) { - $xfer += $output->writeFieldBegin('column_name', TType::STRING, 3); + $xfer += $output->writeFieldBegin('column_name', TType::STRING, 4); $xfer += $output->writeString($this->column_name); $xfer += $output->writeFieldEnd(); } if ($this->nn_name !== null) { - $xfer += $output->writeFieldBegin('nn_name', TType::STRING, 4); + $xfer += $output->writeFieldBegin('nn_name', TType::STRING, 5); $xfer += $output->writeString($this->nn_name); $xfer += $output->writeFieldEnd(); } if ($this->enable_cstr !== null) { - $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 5); + $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 6); $xfer += $output->writeBool($this->enable_cstr); $xfer += $output->writeFieldEnd(); } if ($this->validate_cstr !== null) { - $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 6); + $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 7); $xfer += $output->writeBool($this->validate_cstr); $xfer += $output->writeFieldEnd(); } if ($this->rely_cstr !== null) { - $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 7); + $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 8); $xfer += $output->writeBool($this->rely_cstr); $xfer += $output->writeFieldEnd(); } @@ -1541,6 +1633,10 @@ class SQLDefaultConstraint { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $table_db = null; /** * @var string @@ -1575,40 +1671,47 @@ class SQLDefaultConstraint { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'table_db', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( - 'var' => 'table_name', + 'var' => 'table_db', 'type' => TType::STRING, ), 3 => array( - 'var' => 'column_name', + 'var' => 'table_name', 'type' => TType::STRING, ), 4 => array( - 'var' => 'default_value', + 'var' => 'column_name', 'type' => TType::STRING, ), 5 => array( - 'var' => 'dc_name', + 'var' => 'default_value', 'type' => TType::STRING, ), 6 => array( + 'var' => 'dc_name', + 'type' => TType::STRING, + ), + 7 => array( 'var' => 'enable_cstr', 'type' => TType::BOOL, ), - 7 => array( + 8 => array( 'var' => 'validate_cstr', 'type' => TType::BOOL, ), - 8 => array( + 9 => array( 'var' => 'rely_cstr', 'type' => TType::BOOL, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['table_db'])) { $this->table_db = $vals['table_db']; } @@ -1657,54 +1760,61 @@ class SQLDefaultConstraint { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->table_db); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->table_name); + $xfer += $input->readString($this->table_db); } else { $xfer += $input->skip($ftype); } break; case 3: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->column_name); + $xfer += $input->readString($this->table_name); } else { $xfer += $input->skip($ftype); } break; case 4: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->default_value); + $xfer += $input->readString($this->column_name); } else { $xfer += $input->skip($ftype); } break; case 5: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->dc_name); + $xfer += $input->readString($this->default_value); } else { $xfer += $input->skip($ftype); } break; case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dc_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 7: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->enable_cstr); } else { $xfer += $input->skip($ftype); } break; - case 7: + case 8: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->validate_cstr); } else { $xfer += $input->skip($ftype); } break; - case 8: + case 9: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->rely_cstr); } else { @@ -1724,43 +1834,48 @@ class SQLDefaultConstraint { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('SQLDefaultConstraint'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->table_db !== null) { - $xfer += $output->writeFieldBegin('table_db', TType::STRING, 1); + $xfer += $output->writeFieldBegin('table_db', TType::STRING, 2); $xfer += $output->writeString($this->table_db); $xfer += $output->writeFieldEnd(); } if ($this->table_name !== null) { - $xfer += $output->writeFieldBegin('table_name', TType::STRING, 2); + $xfer += $output->writeFieldBegin('table_name', TType::STRING, 3); $xfer += $output->writeString($this->table_name); $xfer += $output->writeFieldEnd(); } if ($this->column_name !== null) { - $xfer += $output->writeFieldBegin('column_name', TType::STRING, 3); + $xfer += $output->writeFieldBegin('column_name', TType::STRING, 4); $xfer += $output->writeString($this->column_name); $xfer += $output->writeFieldEnd(); } if ($this->default_value !== null) { - $xfer += $output->writeFieldBegin('default_value', TType::STRING, 4); + $xfer += $output->writeFieldBegin('default_value', TType::STRING, 5); $xfer += $output->writeString($this->default_value); $xfer += $output->writeFieldEnd(); } if ($this->dc_name !== null) { - $xfer += $output->writeFieldBegin('dc_name', TType::STRING, 5); + $xfer += $output->writeFieldBegin('dc_name', TType::STRING, 6); $xfer += $output->writeString($this->dc_name); $xfer += $output->writeFieldEnd(); } if ($this->enable_cstr !== null) { - $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 6); + $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 7); $xfer += $output->writeBool($this->enable_cstr); $xfer += $output->writeFieldEnd(); } if ($this->validate_cstr !== null) { - $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 7); + $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 8); $xfer += $output->writeBool($this->validate_cstr); $xfer += $output->writeFieldEnd(); } if ($this->rely_cstr !== null) { - $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 8); + $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 9); $xfer += $output->writeBool($this->rely_cstr); $xfer += $output->writeFieldEnd(); } @@ -1777,6 +1892,10 @@ class SQLCheckConstraint { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $table_db = null; /** * @var string @@ -1811,40 +1930,47 @@ class SQLCheckConstraint { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'table_db', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( - 'var' => 'table_name', + 'var' => 'table_db', 'type' => TType::STRING, ), 3 => array( - 'var' => 'column_name', + 'var' => 'table_name', 'type' => TType::STRING, ), 4 => array( - 'var' => 'check_expression', + 'var' => 'column_name', 'type' => TType::STRING, ), 5 => array( - 'var' => 'dc_name', + 'var' => 'check_expression', 'type' => TType::STRING, ), 6 => array( + 'var' => 'dc_name', + 'type' => TType::STRING, + ), + 7 => array( 'var' => 'enable_cstr', 'type' => TType::BOOL, ), - 7 => array( + 8 => array( 'var' => 'validate_cstr', 'type' => TType::BOOL, ), - 8 => array( + 9 => array( 'var' => 'rely_cstr', 'type' => TType::BOOL, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['table_db'])) { $this->table_db = $vals['table_db']; } @@ -1893,54 +2019,61 @@ class SQLCheckConstraint { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->table_db); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->table_name); + $xfer += $input->readString($this->table_db); } else { $xfer += $input->skip($ftype); } break; case 3: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->column_name); + $xfer += $input->readString($this->table_name); } else { $xfer += $input->skip($ftype); } break; case 4: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->check_expression); + $xfer += $input->readString($this->column_name); } else { $xfer += $input->skip($ftype); } break; case 5: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->dc_name); + $xfer += $input->readString($this->check_expression); } else { $xfer += $input->skip($ftype); } break; case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dc_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 7: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->enable_cstr); } else { $xfer += $input->skip($ftype); } break; - case 7: + case 8: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->validate_cstr); } else { $xfer += $input->skip($ftype); } break; - case 8: + case 9: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->rely_cstr); } else { @@ -1960,43 +2093,48 @@ class SQLCheckConstraint { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('SQLCheckConstraint'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->table_db !== null) { - $xfer += $output->writeFieldBegin('table_db', TType::STRING, 1); + $xfer += $output->writeFieldBegin('table_db', TType::STRING, 2); $xfer += $output->writeString($this->table_db); $xfer += $output->writeFieldEnd(); } if ($this->table_name !== null) { - $xfer += $output->writeFieldBegin('table_name', TType::STRING, 2); + $xfer += $output->writeFieldBegin('table_name', TType::STRING, 3); $xfer += $output->writeString($this->table_name); $xfer += $output->writeFieldEnd(); } if ($this->column_name !== null) { - $xfer += $output->writeFieldBegin('column_name', TType::STRING, 3); + $xfer += $output->writeFieldBegin('column_name', TType::STRING, 4); $xfer += $output->writeString($this->column_name); $xfer += $output->writeFieldEnd(); } if ($this->check_expression !== null) { - $xfer += $output->writeFieldBegin('check_expression', TType::STRING, 4); + $xfer += $output->writeFieldBegin('check_expression', TType::STRING, 5); $xfer += $output->writeString($this->check_expression); $xfer += $output->writeFieldEnd(); } if ($this->dc_name !== null) { - $xfer += $output->writeFieldBegin('dc_name', TType::STRING, 5); + $xfer += $output->writeFieldBegin('dc_name', TType::STRING, 6); $xfer += $output->writeString($this->dc_name); $xfer += $output->writeFieldEnd(); } if ($this->enable_cstr !== null) { - $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 6); + $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 7); $xfer += $output->writeBool($this->enable_cstr); $xfer += $output->writeFieldEnd(); } if ($this->validate_cstr !== null) { - $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 7); + $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 8); $xfer += $output->writeBool($this->validate_cstr); $xfer += $output->writeFieldEnd(); } if ($this->rely_cstr !== null) { - $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 8); + $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 9); $xfer += $output->writeBool($this->rely_cstr); $xfer += $output->writeFieldEnd(); } @@ -2202,6 +2340,10 @@ class HiveObjectRef { * @var string */ public $columnName = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -2230,6 +2372,10 @@ class HiveObjectRef { 'var' => 'columnName', 'type' => TType::STRING, ), + 6 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -2248,6 +2394,9 @@ class HiveObjectRef { if (isset($vals['columnName'])) { $this->columnName = $vals['columnName']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -2315,6 +2464,13 @@ class HiveObjectRef { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -2365,6 +2521,11 @@ class HiveObjectRef { $xfer += $output->writeString($this->columnName); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 6); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -4296,7 +4457,7 @@ class GrantRevokeRoleResponse { } -class Database { +class Catalog { static $_TSPEC; /** @@ -4311,22 +4472,6 @@ class Database { * @var string */ public $locationUri = null; - /** - * @var array - */ - public $parameters = null; - /** - * @var \metastore\PrincipalPrivilegeSet - */ - public $privileges = null; - /** - * @var string - */ - public $ownerName = null; - /** - * @var int - */ - public $ownerType = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -4343,7 +4488,559 @@ class Database { 'var' => 'locationUri', 'type' => TType::STRING, ), - 4 => array( + ); + } + if (is_array($vals)) { + if (isset($vals['name'])) { + $this->name = $vals['name']; + } + if (isset($vals['description'])) { + $this->description = $vals['description']; + } + if (isset($vals['locationUri'])) { + $this->locationUri = $vals['locationUri']; + } + } + } + + public function getName() { + return 'Catalog'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->description); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->locationUri); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('Catalog'); + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 1); + $xfer += $output->writeString($this->name); + $xfer += $output->writeFieldEnd(); + } + if ($this->description !== null) { + $xfer += $output->writeFieldBegin('description', TType::STRING, 2); + $xfer += $output->writeString($this->description); + $xfer += $output->writeFieldEnd(); + } + if ($this->locationUri !== null) { + $xfer += $output->writeFieldBegin('locationUri', TType::STRING, 3); + $xfer += $output->writeString($this->locationUri); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class CreateCatalogRequest { + static $_TSPEC; + + /** + * @var \metastore\Catalog + */ + public $catalog = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'catalog', + 'type' => TType::STRUCT, + 'class' => '\metastore\Catalog', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['catalog'])) { + $this->catalog = $vals['catalog']; + } + } + } + + public function getName() { + return 'CreateCatalogRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->catalog = new \metastore\Catalog(); + $xfer += $this->catalog->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('CreateCatalogRequest'); + if ($this->catalog !== null) { + if (!is_object($this->catalog)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('catalog', TType::STRUCT, 1); + $xfer += $this->catalog->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetCatalogRequest { + static $_TSPEC; + + /** + * @var string + */ + public $name = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'name', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['name'])) { + $this->name = $vals['name']; + } + } + } + + public function getName() { + return 'GetCatalogRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetCatalogRequest'); + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 1); + $xfer += $output->writeString($this->name); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetCatalogResponse { + static $_TSPEC; + + /** + * @var \metastore\Catalog + */ + public $catalog = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'catalog', + 'type' => TType::STRUCT, + 'class' => '\metastore\Catalog', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['catalog'])) { + $this->catalog = $vals['catalog']; + } + } + } + + public function getName() { + return 'GetCatalogResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->catalog = new \metastore\Catalog(); + $xfer += $this->catalog->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetCatalogResponse'); + if ($this->catalog !== null) { + if (!is_object($this->catalog)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('catalog', TType::STRUCT, 1); + $xfer += $this->catalog->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetCatalogsResponse { + static $_TSPEC; + + /** + * @var string[] + */ + public $names = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'names', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['names'])) { + $this->names = $vals['names']; + } + } + } + + public function getName() { + return 'GetCatalogsResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->names = array(); + $_size83 = 0; + $_etype86 = 0; + $xfer += $input->readListBegin($_etype86, $_size83); + for ($_i87 = 0; $_i87 < $_size83; ++$_i87) + { + $elem88 = null; + $xfer += $input->readString($elem88); + $this->names []= $elem88; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetCatalogsResponse'); + if ($this->names !== null) { + if (!is_array($this->names)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('names', TType::LST, 1); + { + $output->writeListBegin(TType::STRING, count($this->names)); + { + foreach ($this->names as $iter89) + { + $xfer += $output->writeString($iter89); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class DropCatalogRequest { + static $_TSPEC; + + /** + * @var string + */ + public $name = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'name', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['name'])) { + $this->name = $vals['name']; + } + } + } + + public function getName() { + return 'DropCatalogRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('DropCatalogRequest'); + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 1); + $xfer += $output->writeString($this->name); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class Database { + static $_TSPEC; + + /** + * @var string + */ + public $name = null; + /** + * @var string + */ + public $description = null; + /** + * @var string + */ + public $locationUri = null; + /** + * @var array + */ + public $parameters = null; + /** + * @var \metastore\PrincipalPrivilegeSet + */ + public $privileges = null; + /** + * @var string + */ + public $ownerName = null; + /** + * @var int + */ + public $ownerType = null; + /** + * @var string + */ + public $catalogName = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'description', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'locationUri', + 'type' => TType::STRING, + ), + 4 => array( 'var' => 'parameters', 'type' => TType::MAP, 'ktype' => TType::STRING, @@ -4368,6 +5065,10 @@ class Database { 'var' => 'ownerType', 'type' => TType::I32, ), + 8 => array( + 'var' => 'catalogName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -4392,6 +5093,9 @@ class Database { if (isset($vals['ownerType'])) { $this->ownerType = $vals['ownerType']; } + if (isset($vals['catalogName'])) { + $this->catalogName = $vals['catalogName']; + } } } @@ -4438,17 +5142,17 @@ class Database { case 4: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size83 = 0; - $_ktype84 = 0; - $_vtype85 = 0; - $xfer += $input->readMapBegin($_ktype84, $_vtype85, $_size83); - for ($_i87 = 0; $_i87 < $_size83; ++$_i87) + $_size90 = 0; + $_ktype91 = 0; + $_vtype92 = 0; + $xfer += $input->readMapBegin($_ktype91, $_vtype92, $_size90); + for ($_i94 = 0; $_i94 < $_size90; ++$_i94) { - $key88 = ''; - $val89 = ''; - $xfer += $input->readString($key88); - $xfer += $input->readString($val89); - $this->parameters[$key88] = $val89; + $key95 = ''; + $val96 = ''; + $xfer += $input->readString($key95); + $xfer += $input->readString($val96); + $this->parameters[$key95] = $val96; } $xfer += $input->readMapEnd(); } else { @@ -4477,6 +5181,13 @@ class Database { $xfer += $input->skip($ftype); } break; + case 8: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catalogName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -4513,10 +5224,10 @@ class Database { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter90 => $viter91) + foreach ($this->parameters as $kiter97 => $viter98) { - $xfer += $output->writeString($kiter90); - $xfer += $output->writeString($viter91); + $xfer += $output->writeString($kiter97); + $xfer += $output->writeString($viter98); } } $output->writeMapEnd(); @@ -4541,6 +5252,11 @@ class Database { $xfer += $output->writeI32($this->ownerType); $xfer += $output->writeFieldEnd(); } + if ($this->catalogName !== null) { + $xfer += $output->writeFieldBegin('catalogName', TType::STRING, 8); + $xfer += $output->writeString($this->catalogName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -4682,17 +5398,17 @@ class SerDeInfo { case 3: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size92 = 0; - $_ktype93 = 0; - $_vtype94 = 0; - $xfer += $input->readMapBegin($_ktype93, $_vtype94, $_size92); - for ($_i96 = 0; $_i96 < $_size92; ++$_i96) + $_size99 = 0; + $_ktype100 = 0; + $_vtype101 = 0; + $xfer += $input->readMapBegin($_ktype100, $_vtype101, $_size99); + for ($_i103 = 0; $_i103 < $_size99; ++$_i103) { - $key97 = ''; - $val98 = ''; - $xfer += $input->readString($key97); - $xfer += $input->readString($val98); - $this->parameters[$key97] = $val98; + $key104 = ''; + $val105 = ''; + $xfer += $input->readString($key104); + $xfer += $input->readString($val105); + $this->parameters[$key104] = $val105; } $xfer += $input->readMapEnd(); } else { @@ -4758,10 +5474,10 @@ class SerDeInfo { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter99 => $viter100) + foreach ($this->parameters as $kiter106 => $viter107) { - $xfer += $output->writeString($kiter99); - $xfer += $output->writeString($viter100); + $xfer += $output->writeString($kiter106); + $xfer += $output->writeString($viter107); } } $output->writeMapEnd(); @@ -4985,14 +5701,14 @@ class SkewedInfo { case 1: if ($ftype == TType::LST) { $this->skewedColNames = array(); - $_size101 = 0; - $_etype104 = 0; - $xfer += $input->readListBegin($_etype104, $_size101); - for ($_i105 = 0; $_i105 < $_size101; ++$_i105) + $_size108 = 0; + $_etype111 = 0; + $xfer += $input->readListBegin($_etype111, $_size108); + for ($_i112 = 0; $_i112 < $_size108; ++$_i112) { - $elem106 = null; - $xfer += $input->readString($elem106); - $this->skewedColNames []= $elem106; + $elem113 = null; + $xfer += $input->readString($elem113); + $this->skewedColNames []= $elem113; } $xfer += $input->readListEnd(); } else { @@ -5002,24 +5718,24 @@ class SkewedInfo { case 2: if ($ftype == TType::LST) { $this->skewedColValues = array(); - $_size107 = 0; - $_etype110 = 0; - $xfer += $input->readListBegin($_etype110, $_size107); - for ($_i111 = 0; $_i111 < $_size107; ++$_i111) + $_size114 = 0; + $_etype117 = 0; + $xfer += $input->readListBegin($_etype117, $_size114); + for ($_i118 = 0; $_i118 < $_size114; ++$_i118) { - $elem112 = null; - $elem112 = array(); - $_size113 = 0; - $_etype116 = 0; - $xfer += $input->readListBegin($_etype116, $_size113); - for ($_i117 = 0; $_i117 < $_size113; ++$_i117) + $elem119 = null; + $elem119 = array(); + $_size120 = 0; + $_etype123 = 0; + $xfer += $input->readListBegin($_etype123, $_size120); + for ($_i124 = 0; $_i124 < $_size120; ++$_i124) { - $elem118 = null; - $xfer += $input->readString($elem118); - $elem112 []= $elem118; + $elem125 = null; + $xfer += $input->readString($elem125); + $elem119 []= $elem125; } $xfer += $input->readListEnd(); - $this->skewedColValues []= $elem112; + $this->skewedColValues []= $elem119; } $xfer += $input->readListEnd(); } else { @@ -5029,27 +5745,27 @@ class SkewedInfo { case 3: if ($ftype == TType::MAP) { $this->skewedColValueLocationMaps = array(); - $_size119 = 0; - $_ktype120 = 0; - $_vtype121 = 0; - $xfer += $input->readMapBegin($_ktype120, $_vtype121, $_size119); - for ($_i123 = 0; $_i123 < $_size119; ++$_i123) + $_size126 = 0; + $_ktype127 = 0; + $_vtype128 = 0; + $xfer += $input->readMapBegin($_ktype127, $_vtype128, $_size126); + for ($_i130 = 0; $_i130 < $_size126; ++$_i130) { - $key124 = array(); - $val125 = ''; - $key124 = array(); - $_size126 = 0; - $_etype129 = 0; - $xfer += $input->readListBegin($_etype129, $_size126); - for ($_i130 = 0; $_i130 < $_size126; ++$_i130) + $key131 = array(); + $val132 = ''; + $key131 = array(); + $_size133 = 0; + $_etype136 = 0; + $xfer += $input->readListBegin($_etype136, $_size133); + for ($_i137 = 0; $_i137 < $_size133; ++$_i137) { - $elem131 = null; - $xfer += $input->readString($elem131); - $key124 []= $elem131; + $elem138 = null; + $xfer += $input->readString($elem138); + $key131 []= $elem138; } $xfer += $input->readListEnd(); - $xfer += $input->readString($val125); - $this->skewedColValueLocationMaps[$key124] = $val125; + $xfer += $input->readString($val132); + $this->skewedColValueLocationMaps[$key131] = $val132; } $xfer += $input->readMapEnd(); } else { @@ -5077,9 +5793,9 @@ class SkewedInfo { { $output->writeListBegin(TType::STRING, count($this->skewedColNames)); { - foreach ($this->skewedColNames as $iter132) + foreach ($this->skewedColNames as $iter139) { - $xfer += $output->writeString($iter132); + $xfer += $output->writeString($iter139); } } $output->writeListEnd(); @@ -5094,14 +5810,14 @@ class SkewedInfo { { $output->writeListBegin(TType::LST, count($this->skewedColValues)); { - foreach ($this->skewedColValues as $iter133) + foreach ($this->skewedColValues as $iter140) { { - $output->writeListBegin(TType::STRING, count($iter133)); + $output->writeListBegin(TType::STRING, count($iter140)); { - foreach ($iter133 as $iter134) + foreach ($iter140 as $iter141) { - $xfer += $output->writeString($iter134); + $xfer += $output->writeString($iter141); } } $output->writeListEnd(); @@ -5120,19 +5836,19 @@ class SkewedInfo { { $output->writeMapBegin(TType::LST, TType::STRING, count($this->skewedColValueLocationMaps)); { - foreach ($this->skewedColValueLocationMaps as $kiter135 => $viter136) + foreach ($this->skewedColValueLocationMaps as $kiter142 => $viter143) { { - $output->writeListBegin(TType::STRING, count($kiter135)); + $output->writeListBegin(TType::STRING, count($kiter142)); { - foreach ($kiter135 as $iter137) + foreach ($kiter142 as $iter144) { - $xfer += $output->writeString($iter137); + $xfer += $output->writeString($iter144); } } $output->writeListEnd(); } - $xfer += $output->writeString($viter136); + $xfer += $output->writeString($viter143); } } $output->writeMapEnd(); @@ -5337,15 +6053,15 @@ class StorageDescriptor { case 1: if ($ftype == TType::LST) { $this->cols = array(); - $_size138 = 0; - $_etype141 = 0; - $xfer += $input->readListBegin($_etype141, $_size138); - for ($_i142 = 0; $_i142 < $_size138; ++$_i142) + $_size145 = 0; + $_etype148 = 0; + $xfer += $input->readListBegin($_etype148, $_size145); + for ($_i149 = 0; $_i149 < $_size145; ++$_i149) { - $elem143 = null; - $elem143 = new \metastore\FieldSchema(); - $xfer += $elem143->read($input); - $this->cols []= $elem143; + $elem150 = null; + $elem150 = new \metastore\FieldSchema(); + $xfer += $elem150->read($input); + $this->cols []= $elem150; } $xfer += $input->readListEnd(); } else { @@ -5398,14 +6114,14 @@ class StorageDescriptor { case 8: if ($ftype == TType::LST) { $this->bucketCols = array(); - $_size144 = 0; - $_etype147 = 0; - $xfer += $input->readListBegin($_etype147, $_size144); - for ($_i148 = 0; $_i148 < $_size144; ++$_i148) + $_size151 = 0; + $_etype154 = 0; + $xfer += $input->readListBegin($_etype154, $_size151); + for ($_i155 = 0; $_i155 < $_size151; ++$_i155) { - $elem149 = null; - $xfer += $input->readString($elem149); - $this->bucketCols []= $elem149; + $elem156 = null; + $xfer += $input->readString($elem156); + $this->bucketCols []= $elem156; } $xfer += $input->readListEnd(); } else { @@ -5415,15 +6131,15 @@ class StorageDescriptor { case 9: if ($ftype == TType::LST) { $this->sortCols = array(); - $_size150 = 0; - $_etype153 = 0; - $xfer += $input->readListBegin($_etype153, $_size150); - for ($_i154 = 0; $_i154 < $_size150; ++$_i154) + $_size157 = 0; + $_etype160 = 0; + $xfer += $input->readListBegin($_etype160, $_size157); + for ($_i161 = 0; $_i161 < $_size157; ++$_i161) { - $elem155 = null; - $elem155 = new \metastore\Order(); - $xfer += $elem155->read($input); - $this->sortCols []= $elem155; + $elem162 = null; + $elem162 = new \metastore\Order(); + $xfer += $elem162->read($input); + $this->sortCols []= $elem162; } $xfer += $input->readListEnd(); } else { @@ -5433,17 +6149,17 @@ class StorageDescriptor { case 10: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size156 = 0; - $_ktype157 = 0; - $_vtype158 = 0; - $xfer += $input->readMapBegin($_ktype157, $_vtype158, $_size156); - for ($_i160 = 0; $_i160 < $_size156; ++$_i160) + $_size163 = 0; + $_ktype164 = 0; + $_vtype165 = 0; + $xfer += $input->readMapBegin($_ktype164, $_vtype165, $_size163); + for ($_i167 = 0; $_i167 < $_size163; ++$_i167) { - $key161 = ''; - $val162 = ''; - $xfer += $input->readString($key161); - $xfer += $input->readString($val162); - $this->parameters[$key161] = $val162; + $key168 = ''; + $val169 = ''; + $xfer += $input->readString($key168); + $xfer += $input->readString($val169); + $this->parameters[$key168] = $val169; } $xfer += $input->readMapEnd(); } else { @@ -5486,9 +6202,9 @@ class StorageDescriptor { { $output->writeListBegin(TType::STRUCT, count($this->cols)); { - foreach ($this->cols as $iter163) + foreach ($this->cols as $iter170) { - $xfer += $iter163->write($output); + $xfer += $iter170->write($output); } } $output->writeListEnd(); @@ -5536,9 +6252,9 @@ class StorageDescriptor { { $output->writeListBegin(TType::STRING, count($this->bucketCols)); { - foreach ($this->bucketCols as $iter164) + foreach ($this->bucketCols as $iter171) { - $xfer += $output->writeString($iter164); + $xfer += $output->writeString($iter171); } } $output->writeListEnd(); @@ -5553,9 +6269,9 @@ class StorageDescriptor { { $output->writeListBegin(TType::STRUCT, count($this->sortCols)); { - foreach ($this->sortCols as $iter165) + foreach ($this->sortCols as $iter172) { - $xfer += $iter165->write($output); + $xfer += $iter172->write($output); } } $output->writeListEnd(); @@ -5570,10 +6286,10 @@ class StorageDescriptor { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter166 => $viter167) + foreach ($this->parameters as $kiter173 => $viter174) { - $xfer += $output->writeString($kiter166); - $xfer += $output->writeString($viter167); + $xfer += $output->writeString($kiter173); + $xfer += $output->writeString($viter174); } } $output->writeMapEnd(); @@ -5667,6 +6383,10 @@ class Table { * @var \metastore\CreationMetadata */ public $creationMetadata = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -5751,6 +6471,10 @@ class Table { 'type' => TType::STRUCT, 'class' => '\metastore\CreationMetadata', ), + 17 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -5802,6 +6526,9 @@ class Table { if (isset($vals['creationMetadata'])) { $this->creationMetadata = $vals['creationMetadata']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -5877,15 +6604,15 @@ class Table { case 8: if ($ftype == TType::LST) { $this->partitionKeys = array(); - $_size168 = 0; - $_etype171 = 0; - $xfer += $input->readListBegin($_etype171, $_size168); - for ($_i172 = 0; $_i172 < $_size168; ++$_i172) + $_size175 = 0; + $_etype178 = 0; + $xfer += $input->readListBegin($_etype178, $_size175); + for ($_i179 = 0; $_i179 < $_size175; ++$_i179) { - $elem173 = null; - $elem173 = new \metastore\FieldSchema(); - $xfer += $elem173->read($input); - $this->partitionKeys []= $elem173; + $elem180 = null; + $elem180 = new \metastore\FieldSchema(); + $xfer += $elem180->read($input); + $this->partitionKeys []= $elem180; } $xfer += $input->readListEnd(); } else { @@ -5895,17 +6622,17 @@ class Table { case 9: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size174 = 0; - $_ktype175 = 0; - $_vtype176 = 0; - $xfer += $input->readMapBegin($_ktype175, $_vtype176, $_size174); - for ($_i178 = 0; $_i178 < $_size174; ++$_i178) + $_size181 = 0; + $_ktype182 = 0; + $_vtype183 = 0; + $xfer += $input->readMapBegin($_ktype182, $_vtype183, $_size181); + for ($_i185 = 0; $_i185 < $_size181; ++$_i185) { - $key179 = ''; - $val180 = ''; - $xfer += $input->readString($key179); - $xfer += $input->readString($val180); - $this->parameters[$key179] = $val180; + $key186 = ''; + $val187 = ''; + $xfer += $input->readString($key186); + $xfer += $input->readString($val187); + $this->parameters[$key186] = $val187; } $xfer += $input->readMapEnd(); } else { @@ -5963,6 +6690,13 @@ class Table { $xfer += $input->skip($ftype); } break; + case 17: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -6022,9 +6756,9 @@ class Table { { $output->writeListBegin(TType::STRUCT, count($this->partitionKeys)); { - foreach ($this->partitionKeys as $iter181) + foreach ($this->partitionKeys as $iter188) { - $xfer += $iter181->write($output); + $xfer += $iter188->write($output); } } $output->writeListEnd(); @@ -6039,10 +6773,10 @@ class Table { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter182 => $viter183) + foreach ($this->parameters as $kiter189 => $viter190) { - $xfer += $output->writeString($kiter182); - $xfer += $output->writeString($viter183); + $xfer += $output->writeString($kiter189); + $xfer += $output->writeString($viter190); } } $output->writeMapEnd(); @@ -6090,6 +6824,11 @@ class Table { $xfer += $this->creationMetadata->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 17); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -6132,6 +6871,10 @@ class Partition { * @var \metastore\PrincipalPrivilegeSet */ public $privileges = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -6182,6 +6925,10 @@ class Partition { 'type' => TType::STRUCT, 'class' => '\metastore\PrincipalPrivilegeSet', ), + 9 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -6209,6 +6956,9 @@ class Partition { if (isset($vals['privileges'])) { $this->privileges = $vals['privileges']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -6234,14 +6984,14 @@ class Partition { case 1: if ($ftype == TType::LST) { $this->values = array(); - $_size184 = 0; - $_etype187 = 0; - $xfer += $input->readListBegin($_etype187, $_size184); - for ($_i188 = 0; $_i188 < $_size184; ++$_i188) + $_size191 = 0; + $_etype194 = 0; + $xfer += $input->readListBegin($_etype194, $_size191); + for ($_i195 = 0; $_i195 < $_size191; ++$_i195) { - $elem189 = null; - $xfer += $input->readString($elem189); - $this->values []= $elem189; + $elem196 = null; + $xfer += $input->readString($elem196); + $this->values []= $elem196; } $xfer += $input->readListEnd(); } else { @@ -6287,17 +7037,17 @@ class Partition { case 7: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size190 = 0; - $_ktype191 = 0; - $_vtype192 = 0; - $xfer += $input->readMapBegin($_ktype191, $_vtype192, $_size190); - for ($_i194 = 0; $_i194 < $_size190; ++$_i194) + $_size197 = 0; + $_ktype198 = 0; + $_vtype199 = 0; + $xfer += $input->readMapBegin($_ktype198, $_vtype199, $_size197); + for ($_i201 = 0; $_i201 < $_size197; ++$_i201) { - $key195 = ''; - $val196 = ''; - $xfer += $input->readString($key195); - $xfer += $input->readString($val196); - $this->parameters[$key195] = $val196; + $key202 = ''; + $val203 = ''; + $xfer += $input->readString($key202); + $xfer += $input->readString($val203); + $this->parameters[$key202] = $val203; } $xfer += $input->readMapEnd(); } else { @@ -6312,6 +7062,13 @@ class Partition { $xfer += $input->skip($ftype); } break; + case 9: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -6333,9 +7090,9 @@ class Partition { { $output->writeListBegin(TType::STRING, count($this->values)); { - foreach ($this->values as $iter197) + foreach ($this->values as $iter204) { - $xfer += $output->writeString($iter197); + $xfer += $output->writeString($iter204); } } $output->writeListEnd(); @@ -6378,10 +7135,10 @@ class Partition { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter198 => $viter199) + foreach ($this->parameters as $kiter205 => $viter206) { - $xfer += $output->writeString($kiter198); - $xfer += $output->writeString($viter199); + $xfer += $output->writeString($kiter205); + $xfer += $output->writeString($viter206); } } $output->writeMapEnd(); @@ -6396,6 +7153,11 @@ class Partition { $xfer += $this->privileges->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 9); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -6517,14 +7279,14 @@ class PartitionWithoutSD { case 1: if ($ftype == TType::LST) { $this->values = array(); - $_size200 = 0; - $_etype203 = 0; - $xfer += $input->readListBegin($_etype203, $_size200); - for ($_i204 = 0; $_i204 < $_size200; ++$_i204) + $_size207 = 0; + $_etype210 = 0; + $xfer += $input->readListBegin($_etype210, $_size207); + for ($_i211 = 0; $_i211 < $_size207; ++$_i211) { - $elem205 = null; - $xfer += $input->readString($elem205); - $this->values []= $elem205; + $elem212 = null; + $xfer += $input->readString($elem212); + $this->values []= $elem212; } $xfer += $input->readListEnd(); } else { @@ -6555,17 +7317,17 @@ class PartitionWithoutSD { case 5: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size206 = 0; - $_ktype207 = 0; - $_vtype208 = 0; - $xfer += $input->readMapBegin($_ktype207, $_vtype208, $_size206); - for ($_i210 = 0; $_i210 < $_size206; ++$_i210) + $_size213 = 0; + $_ktype214 = 0; + $_vtype215 = 0; + $xfer += $input->readMapBegin($_ktype214, $_vtype215, $_size213); + for ($_i217 = 0; $_i217 < $_size213; ++$_i217) { - $key211 = ''; - $val212 = ''; - $xfer += $input->readString($key211); - $xfer += $input->readString($val212); - $this->parameters[$key211] = $val212; + $key218 = ''; + $val219 = ''; + $xfer += $input->readString($key218); + $xfer += $input->readString($val219); + $this->parameters[$key218] = $val219; } $xfer += $input->readMapEnd(); } else { @@ -6601,9 +7363,9 @@ class PartitionWithoutSD { { $output->writeListBegin(TType::STRING, count($this->values)); { - foreach ($this->values as $iter213) + foreach ($this->values as $iter220) { - $xfer += $output->writeString($iter213); + $xfer += $output->writeString($iter220); } } $output->writeListEnd(); @@ -6633,10 +7395,10 @@ class PartitionWithoutSD { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter214 => $viter215) + foreach ($this->parameters as $kiter221 => $viter222) { - $xfer += $output->writeString($kiter214); - $xfer += $output->writeString($viter215); + $xfer += $output->writeString($kiter221); + $xfer += $output->writeString($viter222); } } $output->writeMapEnd(); @@ -6721,15 +7483,15 @@ class PartitionSpecWithSharedSD { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size216 = 0; - $_etype219 = 0; - $xfer += $input->readListBegin($_etype219, $_size216); - for ($_i220 = 0; $_i220 < $_size216; ++$_i220) + $_size223 = 0; + $_etype226 = 0; + $xfer += $input->readListBegin($_etype226, $_size223); + for ($_i227 = 0; $_i227 < $_size223; ++$_i227) { - $elem221 = null; - $elem221 = new \metastore\PartitionWithoutSD(); - $xfer += $elem221->read($input); - $this->partitions []= $elem221; + $elem228 = null; + $elem228 = new \metastore\PartitionWithoutSD(); + $xfer += $elem228->read($input); + $this->partitions []= $elem228; } $xfer += $input->readListEnd(); } else { @@ -6765,9 +7527,9 @@ class PartitionSpecWithSharedSD { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter222) + foreach ($this->partitions as $iter229) { - $xfer += $iter222->write($output); + $xfer += $iter229->write($output); } } $output->writeListEnd(); @@ -6840,15 +7602,15 @@ class PartitionListComposingSpec { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size223 = 0; - $_etype226 = 0; - $xfer += $input->readListBegin($_etype226, $_size223); - for ($_i227 = 0; $_i227 < $_size223; ++$_i227) + $_size230 = 0; + $_etype233 = 0; + $xfer += $input->readListBegin($_etype233, $_size230); + for ($_i234 = 0; $_i234 < $_size230; ++$_i234) { - $elem228 = null; - $elem228 = new \metastore\Partition(); - $xfer += $elem228->read($input); - $this->partitions []= $elem228; + $elem235 = null; + $elem235 = new \metastore\Partition(); + $xfer += $elem235->read($input); + $this->partitions []= $elem235; } $xfer += $input->readListEnd(); } else { @@ -6876,9 +7638,9 @@ class PartitionListComposingSpec { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter229) + foreach ($this->partitions as $iter236) { - $xfer += $iter229->write($output); + $xfer += $iter236->write($output); } } $output->writeListEnd(); @@ -6915,6 +7677,10 @@ class PartitionSpec { * @var \metastore\PartitionListComposingSpec */ public $partitionList = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -6941,6 +7707,10 @@ class PartitionSpec { 'type' => TType::STRUCT, 'class' => '\metastore\PartitionListComposingSpec', ), + 6 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -6959,6 +7729,9 @@ class PartitionSpec { if (isset($vals['partitionList'])) { $this->partitionList = $vals['partitionList']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -7018,6 +7791,13 @@ class PartitionSpec { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -7062,6 +7842,11 @@ class PartitionSpec { $xfer += $this->partitionList->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 6); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -8782,6 +9567,10 @@ class ColumnStatisticsDesc { * @var int */ public $lastAnalyzed = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -8806,6 +9595,10 @@ class ColumnStatisticsDesc { 'var' => 'lastAnalyzed', 'type' => TType::I64, ), + 6 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -8824,6 +9617,9 @@ class ColumnStatisticsDesc { if (isset($vals['lastAnalyzed'])) { $this->lastAnalyzed = $vals['lastAnalyzed']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -8881,6 +9677,13 @@ class ColumnStatisticsDesc { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -8919,6 +9722,11 @@ class ColumnStatisticsDesc { $xfer += $output->writeI64($this->lastAnalyzed); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 6); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -8994,18 +9802,18 @@ class ColumnStatistics { $xfer += $input->skip($ftype); } break; - case 2: - if ($ftype == TType::LST) { - $this->statsObj = array(); - $_size230 = 0; - $_etype233 = 0; - $xfer += $input->readListBegin($_etype233, $_size230); - for ($_i234 = 0; $_i234 < $_size230; ++$_i234) + case 2: + if ($ftype == TType::LST) { + $this->statsObj = array(); + $_size237 = 0; + $_etype240 = 0; + $xfer += $input->readListBegin($_etype240, $_size237); + for ($_i241 = 0; $_i241 < $_size237; ++$_i241) { - $elem235 = null; - $elem235 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem235->read($input); - $this->statsObj []= $elem235; + $elem242 = null; + $elem242 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem242->read($input); + $this->statsObj []= $elem242; } $xfer += $input->readListEnd(); } else { @@ -9041,9 +9849,9 @@ class ColumnStatistics { { $output->writeListBegin(TType::STRUCT, count($this->statsObj)); { - foreach ($this->statsObj as $iter236) + foreach ($this->statsObj as $iter243) { - $xfer += $iter236->write($output); + $xfer += $iter243->write($output); } } $output->writeListEnd(); @@ -9119,15 +9927,15 @@ class AggrStats { case 1: if ($ftype == TType::LST) { $this->colStats = array(); - $_size237 = 0; - $_etype240 = 0; - $xfer += $input->readListBegin($_etype240, $_size237); - for ($_i241 = 0; $_i241 < $_size237; ++$_i241) + $_size244 = 0; + $_etype247 = 0; + $xfer += $input->readListBegin($_etype247, $_size244); + for ($_i248 = 0; $_i248 < $_size244; ++$_i248) { - $elem242 = null; - $elem242 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem242->read($input); - $this->colStats []= $elem242; + $elem249 = null; + $elem249 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem249->read($input); + $this->colStats []= $elem249; } $xfer += $input->readListEnd(); } else { @@ -9162,9 +9970,9 @@ class AggrStats { { $output->writeListBegin(TType::STRUCT, count($this->colStats)); { - foreach ($this->colStats as $iter243) + foreach ($this->colStats as $iter250) { - $xfer += $iter243->write($output); + $xfer += $iter250->write($output); } } $output->writeListEnd(); @@ -9245,15 +10053,15 @@ class SetPartitionsStatsRequest { case 1: if ($ftype == TType::LST) { $this->colStats = array(); - $_size244 = 0; - $_etype247 = 0; - $xfer += $input->readListBegin($_etype247, $_size244); - for ($_i248 = 0; $_i248 < $_size244; ++$_i248) + $_size251 = 0; + $_etype254 = 0; + $xfer += $input->readListBegin($_etype254, $_size251); + for ($_i255 = 0; $_i255 < $_size251; ++$_i255) { - $elem249 = null; - $elem249 = new \metastore\ColumnStatistics(); - $xfer += $elem249->read($input); - $this->colStats []= $elem249; + $elem256 = null; + $elem256 = new \metastore\ColumnStatistics(); + $xfer += $elem256->read($input); + $this->colStats []= $elem256; } $xfer += $input->readListEnd(); } else { @@ -9288,9 +10096,9 @@ class SetPartitionsStatsRequest { { $output->writeListBegin(TType::STRUCT, count($this->colStats)); { - foreach ($this->colStats as $iter250) + foreach ($this->colStats as $iter257) { - $xfer += $iter250->write($output); + $xfer += $iter257->write($output); } } $output->writeListEnd(); @@ -9379,15 +10187,15 @@ class Schema { case 1: if ($ftype == TType::LST) { $this->fieldSchemas = array(); - $_size251 = 0; - $_etype254 = 0; - $xfer += $input->readListBegin($_etype254, $_size251); - for ($_i255 = 0; $_i255 < $_size251; ++$_i255) + $_size258 = 0; + $_etype261 = 0; + $xfer += $input->readListBegin($_etype261, $_size258); + for ($_i262 = 0; $_i262 < $_size258; ++$_i262) { - $elem256 = null; - $elem256 = new \metastore\FieldSchema(); - $xfer += $elem256->read($input); - $this->fieldSchemas []= $elem256; + $elem263 = null; + $elem263 = new \metastore\FieldSchema(); + $xfer += $elem263->read($input); + $this->fieldSchemas []= $elem263; } $xfer += $input->readListEnd(); } else { @@ -9397,17 +10205,17 @@ class Schema { case 2: if ($ftype == TType::MAP) { $this->properties = array(); - $_size257 = 0; - $_ktype258 = 0; - $_vtype259 = 0; - $xfer += $input->readMapBegin($_ktype258, $_vtype259, $_size257); - for ($_i261 = 0; $_i261 < $_size257; ++$_i261) + $_size264 = 0; + $_ktype265 = 0; + $_vtype266 = 0; + $xfer += $input->readMapBegin($_ktype265, $_vtype266, $_size264); + for ($_i268 = 0; $_i268 < $_size264; ++$_i268) { - $key262 = ''; - $val263 = ''; - $xfer += $input->readString($key262); - $xfer += $input->readString($val263); - $this->properties[$key262] = $val263; + $key269 = ''; + $val270 = ''; + $xfer += $input->readString($key269); + $xfer += $input->readString($val270); + $this->properties[$key269] = $val270; } $xfer += $input->readMapEnd(); } else { @@ -9435,9 +10243,9 @@ class Schema { { $output->writeListBegin(TType::STRUCT, count($this->fieldSchemas)); { - foreach ($this->fieldSchemas as $iter264) + foreach ($this->fieldSchemas as $iter271) { - $xfer += $iter264->write($output); + $xfer += $iter271->write($output); } } $output->writeListEnd(); @@ -9452,10 +10260,10 @@ class Schema { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter265 => $viter266) + foreach ($this->properties as $kiter272 => $viter273) { - $xfer += $output->writeString($kiter265); - $xfer += $output->writeString($viter266); + $xfer += $output->writeString($kiter272); + $xfer += $output->writeString($viter273); } } $output->writeMapEnd(); @@ -9523,17 +10331,17 @@ class EnvironmentContext { case 1: if ($ftype == TType::MAP) { $this->properties = array(); - $_size267 = 0; - $_ktype268 = 0; - $_vtype269 = 0; - $xfer += $input->readMapBegin($_ktype268, $_vtype269, $_size267); - for ($_i271 = 0; $_i271 < $_size267; ++$_i271) + $_size274 = 0; + $_ktype275 = 0; + $_vtype276 = 0; + $xfer += $input->readMapBegin($_ktype275, $_vtype276, $_size274); + for ($_i278 = 0; $_i278 < $_size274; ++$_i278) { - $key272 = ''; - $val273 = ''; - $xfer += $input->readString($key272); - $xfer += $input->readString($val273); - $this->properties[$key272] = $val273; + $key279 = ''; + $val280 = ''; + $xfer += $input->readString($key279); + $xfer += $input->readString($val280); + $this->properties[$key279] = $val280; } $xfer += $input->readMapEnd(); } else { @@ -9561,10 +10369,10 @@ class EnvironmentContext { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter274 => $viter275) + foreach ($this->properties as $kiter281 => $viter282) { - $xfer += $output->writeString($kiter274); - $xfer += $output->writeString($viter275); + $xfer += $output->writeString($kiter281); + $xfer += $output->writeString($viter282); } } $output->writeMapEnd(); @@ -9589,6 +10397,10 @@ class PrimaryKeysRequest { * @var string */ public $tbl_name = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -9601,6 +10413,10 @@ class PrimaryKeysRequest { 'var' => 'tbl_name', 'type' => TType::STRING, ), + 3 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -9610,6 +10426,9 @@ class PrimaryKeysRequest { if (isset($vals['tbl_name'])) { $this->tbl_name = $vals['tbl_name']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -9646,6 +10465,13 @@ class PrimaryKeysRequest { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -9669,6 +10495,11 @@ class PrimaryKeysRequest { $xfer += $output->writeString($this->tbl_name); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 3); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -9727,15 +10558,15 @@ class PrimaryKeysResponse { case 1: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size276 = 0; - $_etype279 = 0; - $xfer += $input->readListBegin($_etype279, $_size276); - for ($_i280 = 0; $_i280 < $_size276; ++$_i280) + $_size283 = 0; + $_etype286 = 0; + $xfer += $input->readListBegin($_etype286, $_size283); + for ($_i287 = 0; $_i287 < $_size283; ++$_i287) { - $elem281 = null; - $elem281 = new \metastore\SQLPrimaryKey(); - $xfer += $elem281->read($input); - $this->primaryKeys []= $elem281; + $elem288 = null; + $elem288 = new \metastore\SQLPrimaryKey(); + $xfer += $elem288->read($input); + $this->primaryKeys []= $elem288; } $xfer += $input->readListEnd(); } else { @@ -9763,9 +10594,9 @@ class PrimaryKeysResponse { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter282) + foreach ($this->primaryKeys as $iter289) { - $xfer += $iter282->write($output); + $xfer += $iter289->write($output); } } $output->writeListEnd(); @@ -9798,6 +10629,10 @@ class ForeignKeysRequest { * @var string */ public $foreign_tbl_name = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -9818,6 +10653,10 @@ class ForeignKeysRequest { 'var' => 'foreign_tbl_name', 'type' => TType::STRING, ), + 5 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -9833,6 +10672,9 @@ class ForeignKeysRequest { if (isset($vals['foreign_tbl_name'])) { $this->foreign_tbl_name = $vals['foreign_tbl_name']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -9883,6 +10725,13 @@ class ForeignKeysRequest { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -9916,6 +10765,11 @@ class ForeignKeysRequest { $xfer += $output->writeString($this->foreign_tbl_name); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 5); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -9974,15 +10828,15 @@ class ForeignKeysResponse { case 1: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size283 = 0; - $_etype286 = 0; - $xfer += $input->readListBegin($_etype286, $_size283); - for ($_i287 = 0; $_i287 < $_size283; ++$_i287) + $_size290 = 0; + $_etype293 = 0; + $xfer += $input->readListBegin($_etype293, $_size290); + for ($_i294 = 0; $_i294 < $_size290; ++$_i294) { - $elem288 = null; - $elem288 = new \metastore\SQLForeignKey(); - $xfer += $elem288->read($input); - $this->foreignKeys []= $elem288; + $elem295 = null; + $elem295 = new \metastore\SQLForeignKey(); + $xfer += $elem295->read($input); + $this->foreignKeys []= $elem295; } $xfer += $input->readListEnd(); } else { @@ -10010,9 +10864,9 @@ class ForeignKeysResponse { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter289) + foreach ($this->foreignKeys as $iter296) { - $xfer += $iter289->write($output); + $xfer += $iter296->write($output); } } $output->writeListEnd(); @@ -10032,6 +10886,10 @@ class UniqueConstraintsRequest { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $db_name = null; /** * @var string @@ -10042,16 +10900,23 @@ class UniqueConstraintsRequest { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'db_name', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), + 3 => array( 'var' => 'tbl_name', 'type' => TType::STRING, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['db_name'])) { $this->db_name = $vals['db_name']; } @@ -10082,13 +10947,20 @@ class UniqueConstraintsRequest { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->db_name); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { + $xfer += $input->readString($this->db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { $xfer += $input->readString($this->tbl_name); } else { $xfer += $input->skip($ftype); @@ -10107,13 +10979,18 @@ class UniqueConstraintsRequest { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('UniqueConstraintsRequest'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->db_name !== null) { - $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); + $xfer += $output->writeFieldBegin('db_name', TType::STRING, 2); $xfer += $output->writeString($this->db_name); $xfer += $output->writeFieldEnd(); } if ($this->tbl_name !== null) { - $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 3); $xfer += $output->writeString($this->tbl_name); $xfer += $output->writeFieldEnd(); } @@ -10175,15 +11052,15 @@ class UniqueConstraintsResponse { case 1: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size290 = 0; - $_etype293 = 0; - $xfer += $input->readListBegin($_etype293, $_size290); - for ($_i294 = 0; $_i294 < $_size290; ++$_i294) + $_size297 = 0; + $_etype300 = 0; + $xfer += $input->readListBegin($_etype300, $_size297); + for ($_i301 = 0; $_i301 < $_size297; ++$_i301) { - $elem295 = null; - $elem295 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem295->read($input); - $this->uniqueConstraints []= $elem295; + $elem302 = null; + $elem302 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem302->read($input); + $this->uniqueConstraints []= $elem302; } $xfer += $input->readListEnd(); } else { @@ -10211,9 +11088,9 @@ class UniqueConstraintsResponse { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter296) + foreach ($this->uniqueConstraints as $iter303) { - $xfer += $iter296->write($output); + $xfer += $iter303->write($output); } } $output->writeListEnd(); @@ -10233,6 +11110,10 @@ class NotNullConstraintsRequest { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $db_name = null; /** * @var string @@ -10243,16 +11124,23 @@ class NotNullConstraintsRequest { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'db_name', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), + 3 => array( 'var' => 'tbl_name', 'type' => TType::STRING, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['db_name'])) { $this->db_name = $vals['db_name']; } @@ -10283,13 +11171,20 @@ class NotNullConstraintsRequest { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->db_name); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { + $xfer += $input->readString($this->db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { $xfer += $input->readString($this->tbl_name); } else { $xfer += $input->skip($ftype); @@ -10308,13 +11203,18 @@ class NotNullConstraintsRequest { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('NotNullConstraintsRequest'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->db_name !== null) { - $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); + $xfer += $output->writeFieldBegin('db_name', TType::STRING, 2); $xfer += $output->writeString($this->db_name); $xfer += $output->writeFieldEnd(); } if ($this->tbl_name !== null) { - $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 3); $xfer += $output->writeString($this->tbl_name); $xfer += $output->writeFieldEnd(); } @@ -10376,15 +11276,15 @@ class NotNullConstraintsResponse { case 1: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size297 = 0; - $_etype300 = 0; - $xfer += $input->readListBegin($_etype300, $_size297); - for ($_i301 = 0; $_i301 < $_size297; ++$_i301) + $_size304 = 0; + $_etype307 = 0; + $xfer += $input->readListBegin($_etype307, $_size304); + for ($_i308 = 0; $_i308 < $_size304; ++$_i308) { - $elem302 = null; - $elem302 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem302->read($input); - $this->notNullConstraints []= $elem302; + $elem309 = null; + $elem309 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem309->read($input); + $this->notNullConstraints []= $elem309; } $xfer += $input->readListEnd(); } else { @@ -10412,9 +11312,9 @@ class NotNullConstraintsResponse { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter303) + foreach ($this->notNullConstraints as $iter310) { - $xfer += $iter303->write($output); + $xfer += $iter310->write($output); } } $output->writeListEnd(); @@ -10434,6 +11334,10 @@ class DefaultConstraintsRequest { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $db_name = null; /** * @var string @@ -10444,16 +11348,23 @@ class DefaultConstraintsRequest { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'db_name', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), + 3 => array( 'var' => 'tbl_name', 'type' => TType::STRING, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['db_name'])) { $this->db_name = $vals['db_name']; } @@ -10484,13 +11395,20 @@ class DefaultConstraintsRequest { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->db_name); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { + $xfer += $input->readString($this->db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { $xfer += $input->readString($this->tbl_name); } else { $xfer += $input->skip($ftype); @@ -10509,13 +11427,18 @@ class DefaultConstraintsRequest { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('DefaultConstraintsRequest'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->db_name !== null) { - $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); + $xfer += $output->writeFieldBegin('db_name', TType::STRING, 2); $xfer += $output->writeString($this->db_name); $xfer += $output->writeFieldEnd(); } if ($this->tbl_name !== null) { - $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 3); $xfer += $output->writeString($this->tbl_name); $xfer += $output->writeFieldEnd(); } @@ -10577,15 +11500,15 @@ class DefaultConstraintsResponse { case 1: if ($ftype == TType::LST) { $this->defaultConstraints = array(); - $_size304 = 0; - $_etype307 = 0; - $xfer += $input->readListBegin($_etype307, $_size304); - for ($_i308 = 0; $_i308 < $_size304; ++$_i308) + $_size311 = 0; + $_etype314 = 0; + $xfer += $input->readListBegin($_etype314, $_size311); + for ($_i315 = 0; $_i315 < $_size311; ++$_i315) { - $elem309 = null; - $elem309 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem309->read($input); - $this->defaultConstraints []= $elem309; + $elem316 = null; + $elem316 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem316->read($input); + $this->defaultConstraints []= $elem316; } $xfer += $input->readListEnd(); } else { @@ -10613,9 +11536,9 @@ class DefaultConstraintsResponse { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); { - foreach ($this->defaultConstraints as $iter310) + foreach ($this->defaultConstraints as $iter317) { - $xfer += $iter310->write($output); + $xfer += $iter317->write($output); } } $output->writeListEnd(); @@ -10635,6 +11558,10 @@ class CheckConstraintsRequest { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $db_name = null; /** * @var string @@ -10645,16 +11572,23 @@ class CheckConstraintsRequest { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'db_name', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), + 3 => array( 'var' => 'tbl_name', 'type' => TType::STRING, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['db_name'])) { $this->db_name = $vals['db_name']; } @@ -10685,13 +11619,20 @@ class CheckConstraintsRequest { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->db_name); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { + $xfer += $input->readString($this->db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { $xfer += $input->readString($this->tbl_name); } else { $xfer += $input->skip($ftype); @@ -10710,13 +11651,18 @@ class CheckConstraintsRequest { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('CheckConstraintsRequest'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->db_name !== null) { - $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); + $xfer += $output->writeFieldBegin('db_name', TType::STRING, 2); $xfer += $output->writeString($this->db_name); $xfer += $output->writeFieldEnd(); } if ($this->tbl_name !== null) { - $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 3); $xfer += $output->writeString($this->tbl_name); $xfer += $output->writeFieldEnd(); } @@ -10778,15 +11724,15 @@ class CheckConstraintsResponse { case 1: if ($ftype == TType::LST) { $this->checkConstraints = array(); - $_size311 = 0; - $_etype314 = 0; - $xfer += $input->readListBegin($_etype314, $_size311); - for ($_i315 = 0; $_i315 < $_size311; ++$_i315) + $_size318 = 0; + $_etype321 = 0; + $xfer += $input->readListBegin($_etype321, $_size318); + for ($_i322 = 0; $_i322 < $_size318; ++$_i322) { - $elem316 = null; - $elem316 = new \metastore\SQLCheckConstraint(); - $xfer += $elem316->read($input); - $this->checkConstraints []= $elem316; + $elem323 = null; + $elem323 = new \metastore\SQLCheckConstraint(); + $xfer += $elem323->read($input); + $this->checkConstraints []= $elem323; } $xfer += $input->readListEnd(); } else { @@ -10814,9 +11760,9 @@ class CheckConstraintsResponse { { $output->writeListBegin(TType::STRUCT, count($this->checkConstraints)); { - foreach ($this->checkConstraints as $iter317) + foreach ($this->checkConstraints as $iter324) { - $xfer += $iter317->write($output); + $xfer += $iter324->write($output); } } $output->writeListEnd(); @@ -10845,6 +11791,10 @@ class DropConstraintRequest { * @var string */ public $constraintname = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -10861,6 +11811,10 @@ class DropConstraintRequest { 'var' => 'constraintname', 'type' => TType::STRING, ), + 4 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -10873,6 +11827,9 @@ class DropConstraintRequest { if (isset($vals['constraintname'])) { $this->constraintname = $vals['constraintname']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -10909,9 +11866,16 @@ class DropConstraintRequest { $xfer += $input->skip($ftype); } break; - case 3: + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->constraintname); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->constraintname); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } @@ -10944,6 +11908,11 @@ class DropConstraintRequest { $xfer += $output->writeString($this->constraintname); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 4); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -11002,15 +11971,15 @@ class AddPrimaryKeyRequest { case 1: if ($ftype == TType::LST) { $this->primaryKeyCols = array(); - $_size318 = 0; - $_etype321 = 0; - $xfer += $input->readListBegin($_etype321, $_size318); - for ($_i322 = 0; $_i322 < $_size318; ++$_i322) + $_size325 = 0; + $_etype328 = 0; + $xfer += $input->readListBegin($_etype328, $_size325); + for ($_i329 = 0; $_i329 < $_size325; ++$_i329) { - $elem323 = null; - $elem323 = new \metastore\SQLPrimaryKey(); - $xfer += $elem323->read($input); - $this->primaryKeyCols []= $elem323; + $elem330 = null; + $elem330 = new \metastore\SQLPrimaryKey(); + $xfer += $elem330->read($input); + $this->primaryKeyCols []= $elem330; } $xfer += $input->readListEnd(); } else { @@ -11038,9 +12007,9 @@ class AddPrimaryKeyRequest { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeyCols)); { - foreach ($this->primaryKeyCols as $iter324) + foreach ($this->primaryKeyCols as $iter331) { - $xfer += $iter324->write($output); + $xfer += $iter331->write($output); } } $output->writeListEnd(); @@ -11105,15 +12074,15 @@ class AddForeignKeyRequest { case 1: if ($ftype == TType::LST) { $this->foreignKeyCols = array(); - $_size325 = 0; - $_etype328 = 0; - $xfer += $input->readListBegin($_etype328, $_size325); - for ($_i329 = 0; $_i329 < $_size325; ++$_i329) + $_size332 = 0; + $_etype335 = 0; + $xfer += $input->readListBegin($_etype335, $_size332); + for ($_i336 = 0; $_i336 < $_size332; ++$_i336) { - $elem330 = null; - $elem330 = new \metastore\SQLForeignKey(); - $xfer += $elem330->read($input); - $this->foreignKeyCols []= $elem330; + $elem337 = null; + $elem337 = new \metastore\SQLForeignKey(); + $xfer += $elem337->read($input); + $this->foreignKeyCols []= $elem337; } $xfer += $input->readListEnd(); } else { @@ -11141,9 +12110,9 @@ class AddForeignKeyRequest { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeyCols)); { - foreach ($this->foreignKeyCols as $iter331) + foreach ($this->foreignKeyCols as $iter338) { - $xfer += $iter331->write($output); + $xfer += $iter338->write($output); } } $output->writeListEnd(); @@ -11208,15 +12177,15 @@ class AddUniqueConstraintRequest { case 1: if ($ftype == TType::LST) { $this->uniqueConstraintCols = array(); - $_size332 = 0; - $_etype335 = 0; - $xfer += $input->readListBegin($_etype335, $_size332); - for ($_i336 = 0; $_i336 < $_size332; ++$_i336) + $_size339 = 0; + $_etype342 = 0; + $xfer += $input->readListBegin($_etype342, $_size339); + for ($_i343 = 0; $_i343 < $_size339; ++$_i343) { - $elem337 = null; - $elem337 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem337->read($input); - $this->uniqueConstraintCols []= $elem337; + $elem344 = null; + $elem344 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem344->read($input); + $this->uniqueConstraintCols []= $elem344; } $xfer += $input->readListEnd(); } else { @@ -11244,9 +12213,9 @@ class AddUniqueConstraintRequest { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraintCols)); { - foreach ($this->uniqueConstraintCols as $iter338) + foreach ($this->uniqueConstraintCols as $iter345) { - $xfer += $iter338->write($output); + $xfer += $iter345->write($output); } } $output->writeListEnd(); @@ -11311,15 +12280,15 @@ class AddNotNullConstraintRequest { case 1: if ($ftype == TType::LST) { $this->notNullConstraintCols = array(); - $_size339 = 0; - $_etype342 = 0; - $xfer += $input->readListBegin($_etype342, $_size339); - for ($_i343 = 0; $_i343 < $_size339; ++$_i343) + $_size346 = 0; + $_etype349 = 0; + $xfer += $input->readListBegin($_etype349, $_size346); + for ($_i350 = 0; $_i350 < $_size346; ++$_i350) { - $elem344 = null; - $elem344 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem344->read($input); - $this->notNullConstraintCols []= $elem344; + $elem351 = null; + $elem351 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem351->read($input); + $this->notNullConstraintCols []= $elem351; } $xfer += $input->readListEnd(); } else { @@ -11347,9 +12316,9 @@ class AddNotNullConstraintRequest { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraintCols)); { - foreach ($this->notNullConstraintCols as $iter345) + foreach ($this->notNullConstraintCols as $iter352) { - $xfer += $iter345->write($output); + $xfer += $iter352->write($output); } } $output->writeListEnd(); @@ -11414,15 +12383,15 @@ class AddDefaultConstraintRequest { case 1: if ($ftype == TType::LST) { $this->defaultConstraintCols = array(); - $_size346 = 0; - $_etype349 = 0; - $xfer += $input->readListBegin($_etype349, $_size346); - for ($_i350 = 0; $_i350 < $_size346; ++$_i350) + $_size353 = 0; + $_etype356 = 0; + $xfer += $input->readListBegin($_etype356, $_size353); + for ($_i357 = 0; $_i357 < $_size353; ++$_i357) { - $elem351 = null; - $elem351 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem351->read($input); - $this->defaultConstraintCols []= $elem351; + $elem358 = null; + $elem358 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem358->read($input); + $this->defaultConstraintCols []= $elem358; } $xfer += $input->readListEnd(); } else { @@ -11450,9 +12419,9 @@ class AddDefaultConstraintRequest { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraintCols)); { - foreach ($this->defaultConstraintCols as $iter352) + foreach ($this->defaultConstraintCols as $iter359) { - $xfer += $iter352->write($output); + $xfer += $iter359->write($output); } } $output->writeListEnd(); @@ -11517,15 +12486,15 @@ class AddCheckConstraintRequest { case 1: if ($ftype == TType::LST) { $this->checkConstraintCols = array(); - $_size353 = 0; - $_etype356 = 0; - $xfer += $input->readListBegin($_etype356, $_size353); - for ($_i357 = 0; $_i357 < $_size353; ++$_i357) + $_size360 = 0; + $_etype363 = 0; + $xfer += $input->readListBegin($_etype363, $_size360); + for ($_i364 = 0; $_i364 < $_size360; ++$_i364) { - $elem358 = null; - $elem358 = new \metastore\SQLCheckConstraint(); - $xfer += $elem358->read($input); - $this->checkConstraintCols []= $elem358; + $elem365 = null; + $elem365 = new \metastore\SQLCheckConstraint(); + $xfer += $elem365->read($input); + $this->checkConstraintCols []= $elem365; } $xfer += $input->readListEnd(); } else { @@ -11553,9 +12522,9 @@ class AddCheckConstraintRequest { { $output->writeListBegin(TType::STRUCT, count($this->checkConstraintCols)); { - foreach ($this->checkConstraintCols as $iter359) + foreach ($this->checkConstraintCols as $iter366) { - $xfer += $iter359->write($output); + $xfer += $iter366->write($output); } } $output->writeListEnd(); @@ -11631,15 +12600,15 @@ class PartitionsByExprResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size360 = 0; - $_etype363 = 0; - $xfer += $input->readListBegin($_etype363, $_size360); - for ($_i364 = 0; $_i364 < $_size360; ++$_i364) + $_size367 = 0; + $_etype370 = 0; + $xfer += $input->readListBegin($_etype370, $_size367); + for ($_i371 = 0; $_i371 < $_size367; ++$_i371) { - $elem365 = null; - $elem365 = new \metastore\Partition(); - $xfer += $elem365->read($input); - $this->partitions []= $elem365; + $elem372 = null; + $elem372 = new \metastore\Partition(); + $xfer += $elem372->read($input); + $this->partitions []= $elem372; } $xfer += $input->readListEnd(); } else { @@ -11674,9 +12643,9 @@ class PartitionsByExprResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter366) + foreach ($this->partitions as $iter373) { - $xfer += $iter366->write($output); + $xfer += $iter373->write($output); } } $output->writeListEnd(); @@ -11718,6 +12687,10 @@ class PartitionsByExprRequest { * @var int */ public $maxParts = -1; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -11742,6 +12715,10 @@ class PartitionsByExprRequest { 'var' => 'maxParts', 'type' => TType::I16, ), + 6 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -11760,6 +12737,9 @@ class PartitionsByExprRequest { if (isset($vals['maxParts'])) { $this->maxParts = $vals['maxParts']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -11817,6 +12797,13 @@ class PartitionsByExprRequest { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -11855,6 +12842,11 @@ class PartitionsByExprRequest { $xfer += $output->writeI16($this->maxParts); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 6); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -11913,15 +12905,15 @@ class TableStatsResult { case 1: if ($ftype == TType::LST) { $this->tableStats = array(); - $_size367 = 0; - $_etype370 = 0; - $xfer += $input->readListBegin($_etype370, $_size367); - for ($_i371 = 0; $_i371 < $_size367; ++$_i371) + $_size374 = 0; + $_etype377 = 0; + $xfer += $input->readListBegin($_etype377, $_size374); + for ($_i378 = 0; $_i378 < $_size374; ++$_i378) { - $elem372 = null; - $elem372 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem372->read($input); - $this->tableStats []= $elem372; + $elem379 = null; + $elem379 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem379->read($input); + $this->tableStats []= $elem379; } $xfer += $input->readListEnd(); } else { @@ -11949,9 +12941,9 @@ class TableStatsResult { { $output->writeListBegin(TType::STRUCT, count($this->tableStats)); { - foreach ($this->tableStats as $iter373) + foreach ($this->tableStats as $iter380) { - $xfer += $iter373->write($output); + $xfer += $iter380->write($output); } } $output->writeListEnd(); @@ -12024,28 +13016,28 @@ class PartitionsStatsResult { case 1: if ($ftype == TType::MAP) { $this->partStats = array(); - $_size374 = 0; - $_ktype375 = 0; - $_vtype376 = 0; - $xfer += $input->readMapBegin($_ktype375, $_vtype376, $_size374); - for ($_i378 = 0; $_i378 < $_size374; ++$_i378) + $_size381 = 0; + $_ktype382 = 0; + $_vtype383 = 0; + $xfer += $input->readMapBegin($_ktype382, $_vtype383, $_size381); + for ($_i385 = 0; $_i385 < $_size381; ++$_i385) { - $key379 = ''; - $val380 = array(); - $xfer += $input->readString($key379); - $val380 = array(); - $_size381 = 0; - $_etype384 = 0; - $xfer += $input->readListBegin($_etype384, $_size381); - for ($_i385 = 0; $_i385 < $_size381; ++$_i385) + $key386 = ''; + $val387 = array(); + $xfer += $input->readString($key386); + $val387 = array(); + $_size388 = 0; + $_etype391 = 0; + $xfer += $input->readListBegin($_etype391, $_size388); + for ($_i392 = 0; $_i392 < $_size388; ++$_i392) { - $elem386 = null; - $elem386 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem386->read($input); - $val380 []= $elem386; + $elem393 = null; + $elem393 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem393->read($input); + $val387 []= $elem393; } $xfer += $input->readListEnd(); - $this->partStats[$key379] = $val380; + $this->partStats[$key386] = $val387; } $xfer += $input->readMapEnd(); } else { @@ -12073,15 +13065,15 @@ class PartitionsStatsResult { { $output->writeMapBegin(TType::STRING, TType::LST, count($this->partStats)); { - foreach ($this->partStats as $kiter387 => $viter388) + foreach ($this->partStats as $kiter394 => $viter395) { - $xfer += $output->writeString($kiter387); + $xfer += $output->writeString($kiter394); { - $output->writeListBegin(TType::STRUCT, count($viter388)); + $output->writeListBegin(TType::STRUCT, count($viter395)); { - foreach ($viter388 as $iter389) + foreach ($viter395 as $iter396) { - $xfer += $iter389->write($output); + $xfer += $iter396->write($output); } } $output->writeListEnd(); @@ -12114,6 +13106,10 @@ class TableStatsRequest { * @var string[] */ public $colNames = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -12134,6 +13130,10 @@ class TableStatsRequest { 'type' => TType::STRING, ), ), + 4 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -12146,6 +13146,9 @@ class TableStatsRequest { if (isset($vals['colNames'])) { $this->colNames = $vals['colNames']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -12185,20 +13188,27 @@ class TableStatsRequest { case 3: if ($ftype == TType::LST) { $this->colNames = array(); - $_size390 = 0; - $_etype393 = 0; - $xfer += $input->readListBegin($_etype393, $_size390); - for ($_i394 = 0; $_i394 < $_size390; ++$_i394) + $_size397 = 0; + $_etype400 = 0; + $xfer += $input->readListBegin($_etype400, $_size397); + for ($_i401 = 0; $_i401 < $_size397; ++$_i401) { - $elem395 = null; - $xfer += $input->readString($elem395); - $this->colNames []= $elem395; + $elem402 = null; + $xfer += $input->readString($elem402); + $this->colNames []= $elem402; } $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -12230,15 +13240,20 @@ class TableStatsRequest { { $output->writeListBegin(TType::STRING, count($this->colNames)); { - foreach ($this->colNames as $iter396) + foreach ($this->colNames as $iter403) { - $xfer += $output->writeString($iter396); + $xfer += $output->writeString($iter403); } } $output->writeListEnd(); } $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 4); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -12265,6 +13280,10 @@ class PartitionsStatsRequest { * @var string[] */ public $partNames = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -12293,6 +13312,10 @@ class PartitionsStatsRequest { 'type' => TType::STRING, ), ), + 5 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -12308,6 +13331,9 @@ class PartitionsStatsRequest { if (isset($vals['partNames'])) { $this->partNames = $vals['partNames']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -12347,14 +13373,14 @@ class PartitionsStatsRequest { case 3: if ($ftype == TType::LST) { $this->colNames = array(); - $_size397 = 0; - $_etype400 = 0; - $xfer += $input->readListBegin($_etype400, $_size397); - for ($_i401 = 0; $_i401 < $_size397; ++$_i401) + $_size404 = 0; + $_etype407 = 0; + $xfer += $input->readListBegin($_etype407, $_size404); + for ($_i408 = 0; $_i408 < $_size404; ++$_i408) { - $elem402 = null; - $xfer += $input->readString($elem402); - $this->colNames []= $elem402; + $elem409 = null; + $xfer += $input->readString($elem409); + $this->colNames []= $elem409; } $xfer += $input->readListEnd(); } else { @@ -12364,20 +13390,27 @@ class PartitionsStatsRequest { case 4: if ($ftype == TType::LST) { $this->partNames = array(); - $_size403 = 0; - $_etype406 = 0; - $xfer += $input->readListBegin($_etype406, $_size403); - for ($_i407 = 0; $_i407 < $_size403; ++$_i407) + $_size410 = 0; + $_etype413 = 0; + $xfer += $input->readListBegin($_etype413, $_size410); + for ($_i414 = 0; $_i414 < $_size410; ++$_i414) { - $elem408 = null; - $xfer += $input->readString($elem408); - $this->partNames []= $elem408; + $elem415 = null; + $xfer += $input->readString($elem415); + $this->partNames []= $elem415; } $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -12409,9 +13442,9 @@ class PartitionsStatsRequest { { $output->writeListBegin(TType::STRING, count($this->colNames)); { - foreach ($this->colNames as $iter409) + foreach ($this->colNames as $iter416) { - $xfer += $output->writeString($iter409); + $xfer += $output->writeString($iter416); } } $output->writeListEnd(); @@ -12426,15 +13459,20 @@ class PartitionsStatsRequest { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter410) + foreach ($this->partNames as $iter417) { - $xfer += $output->writeString($iter410); + $xfer += $output->writeString($iter417); } } $output->writeListEnd(); } $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 5); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -12493,15 +13531,15 @@ class AddPartitionsResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size411 = 0; - $_etype414 = 0; - $xfer += $input->readListBegin($_etype414, $_size411); - for ($_i415 = 0; $_i415 < $_size411; ++$_i415) + $_size418 = 0; + $_etype421 = 0; + $xfer += $input->readListBegin($_etype421, $_size418); + for ($_i422 = 0; $_i422 < $_size418; ++$_i422) { - $elem416 = null; - $elem416 = new \metastore\Partition(); - $xfer += $elem416->read($input); - $this->partitions []= $elem416; + $elem423 = null; + $elem423 = new \metastore\Partition(); + $xfer += $elem423->read($input); + $this->partitions []= $elem423; } $xfer += $input->readListEnd(); } else { @@ -12529,9 +13567,9 @@ class AddPartitionsResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter417) + foreach ($this->partitions as $iter424) { - $xfer += $iter417->write($output); + $xfer += $iter424->write($output); } } $output->writeListEnd(); @@ -12568,6 +13606,10 @@ class AddPartitionsRequest { * @var bool */ public $needResult = true; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -12597,6 +13639,10 @@ class AddPartitionsRequest { 'var' => 'needResult', 'type' => TType::BOOL, ), + 6 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -12615,6 +13661,9 @@ class AddPartitionsRequest { if (isset($vals['needResult'])) { $this->needResult = $vals['needResult']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -12654,15 +13703,15 @@ class AddPartitionsRequest { case 3: if ($ftype == TType::LST) { $this->parts = array(); - $_size418 = 0; - $_etype421 = 0; - $xfer += $input->readListBegin($_etype421, $_size418); - for ($_i422 = 0; $_i422 < $_size418; ++$_i422) + $_size425 = 0; + $_etype428 = 0; + $xfer += $input->readListBegin($_etype428, $_size425); + for ($_i429 = 0; $_i429 < $_size425; ++$_i429) { - $elem423 = null; - $elem423 = new \metastore\Partition(); - $xfer += $elem423->read($input); - $this->parts []= $elem423; + $elem430 = null; + $elem430 = new \metastore\Partition(); + $xfer += $elem430->read($input); + $this->parts []= $elem430; } $xfer += $input->readListEnd(); } else { @@ -12683,6 +13732,13 @@ class AddPartitionsRequest { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -12714,9 +13770,9 @@ class AddPartitionsRequest { { $output->writeListBegin(TType::STRUCT, count($this->parts)); { - foreach ($this->parts as $iter424) + foreach ($this->parts as $iter431) { - $xfer += $iter424->write($output); + $xfer += $iter431->write($output); } } $output->writeListEnd(); @@ -12733,6 +13789,11 @@ class AddPartitionsRequest { $xfer += $output->writeBool($this->needResult); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 6); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -12791,15 +13852,15 @@ class DropPartitionsResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size425 = 0; - $_etype428 = 0; - $xfer += $input->readListBegin($_etype428, $_size425); - for ($_i429 = 0; $_i429 < $_size425; ++$_i429) + $_size432 = 0; + $_etype435 = 0; + $xfer += $input->readListBegin($_etype435, $_size432); + for ($_i436 = 0; $_i436 < $_size432; ++$_i436) { - $elem430 = null; - $elem430 = new \metastore\Partition(); - $xfer += $elem430->read($input); - $this->partitions []= $elem430; + $elem437 = null; + $elem437 = new \metastore\Partition(); + $xfer += $elem437->read($input); + $this->partitions []= $elem437; } $xfer += $input->readListEnd(); } else { @@ -12827,9 +13888,9 @@ class DropPartitionsResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter431) + foreach ($this->partitions as $iter438) { - $xfer += $iter431->write($output); + $xfer += $iter438->write($output); } } $output->writeListEnd(); @@ -13007,14 +14068,14 @@ class RequestPartsSpec { case 1: if ($ftype == TType::LST) { $this->names = array(); - $_size432 = 0; - $_etype435 = 0; - $xfer += $input->readListBegin($_etype435, $_size432); - for ($_i436 = 0; $_i436 < $_size432; ++$_i436) + $_size439 = 0; + $_etype442 = 0; + $xfer += $input->readListBegin($_etype442, $_size439); + for ($_i443 = 0; $_i443 < $_size439; ++$_i443) { - $elem437 = null; - $xfer += $input->readString($elem437); - $this->names []= $elem437; + $elem444 = null; + $xfer += $input->readString($elem444); + $this->names []= $elem444; } $xfer += $input->readListEnd(); } else { @@ -13024,15 +14085,15 @@ class RequestPartsSpec { case 2: if ($ftype == TType::LST) { $this->exprs = array(); - $_size438 = 0; - $_etype441 = 0; - $xfer += $input->readListBegin($_etype441, $_size438); - for ($_i442 = 0; $_i442 < $_size438; ++$_i442) + $_size445 = 0; + $_etype448 = 0; + $xfer += $input->readListBegin($_etype448, $_size445); + for ($_i449 = 0; $_i449 < $_size445; ++$_i449) { - $elem443 = null; - $elem443 = new \metastore\DropPartitionsExpr(); - $xfer += $elem443->read($input); - $this->exprs []= $elem443; + $elem450 = null; + $elem450 = new \metastore\DropPartitionsExpr(); + $xfer += $elem450->read($input); + $this->exprs []= $elem450; } $xfer += $input->readListEnd(); } else { @@ -13060,9 +14121,9 @@ class RequestPartsSpec { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter444) + foreach ($this->names as $iter451) { - $xfer += $output->writeString($iter444); + $xfer += $output->writeString($iter451); } } $output->writeListEnd(); @@ -13077,9 +14138,9 @@ class RequestPartsSpec { { $output->writeListBegin(TType::STRUCT, count($this->exprs)); { - foreach ($this->exprs as $iter445) + foreach ($this->exprs as $iter452) { - $xfer += $iter445->write($output); + $xfer += $iter452->write($output); } } $output->writeListEnd(); @@ -13128,6 +14189,10 @@ class DropPartitionsRequest { * @var bool */ public $needResult = true; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13166,6 +14231,10 @@ class DropPartitionsRequest { 'var' => 'needResult', 'type' => TType::BOOL, ), + 9 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -13193,6 +14262,9 @@ class DropPartitionsRequest { if (isset($vals['needResult'])) { $this->needResult = $vals['needResult']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -13273,6 +14345,13 @@ class DropPartitionsRequest { $xfer += $input->skip($ftype); } break; + case 9: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13332,6 +14411,11 @@ class DropPartitionsRequest { $xfer += $output->writeBool($this->needResult); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 9); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -13374,6 +14458,10 @@ class PartitionValuesRequest { * @var int */ public $maxParts = -1; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13420,6 +14508,10 @@ class PartitionValuesRequest { 'var' => 'maxParts', 'type' => TType::I64, ), + 9 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -13447,6 +14539,9 @@ class PartitionValuesRequest { if (isset($vals['maxParts'])) { $this->maxParts = $vals['maxParts']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -13486,15 +14581,15 @@ class PartitionValuesRequest { case 3: if ($ftype == TType::LST) { $this->partitionKeys = array(); - $_size446 = 0; - $_etype449 = 0; - $xfer += $input->readListBegin($_etype449, $_size446); - for ($_i450 = 0; $_i450 < $_size446; ++$_i450) + $_size453 = 0; + $_etype456 = 0; + $xfer += $input->readListBegin($_etype456, $_size453); + for ($_i457 = 0; $_i457 < $_size453; ++$_i457) { - $elem451 = null; - $elem451 = new \metastore\FieldSchema(); - $xfer += $elem451->read($input); - $this->partitionKeys []= $elem451; + $elem458 = null; + $elem458 = new \metastore\FieldSchema(); + $xfer += $elem458->read($input); + $this->partitionKeys []= $elem458; } $xfer += $input->readListEnd(); } else { @@ -13518,15 +14613,15 @@ class PartitionValuesRequest { case 6: if ($ftype == TType::LST) { $this->partitionOrder = array(); - $_size452 = 0; - $_etype455 = 0; - $xfer += $input->readListBegin($_etype455, $_size452); - for ($_i456 = 0; $_i456 < $_size452; ++$_i456) + $_size459 = 0; + $_etype462 = 0; + $xfer += $input->readListBegin($_etype462, $_size459); + for ($_i463 = 0; $_i463 < $_size459; ++$_i463) { - $elem457 = null; - $elem457 = new \metastore\FieldSchema(); - $xfer += $elem457->read($input); - $this->partitionOrder []= $elem457; + $elem464 = null; + $elem464 = new \metastore\FieldSchema(); + $xfer += $elem464->read($input); + $this->partitionOrder []= $elem464; } $xfer += $input->readListEnd(); } else { @@ -13547,6 +14642,13 @@ class PartitionValuesRequest { $xfer += $input->skip($ftype); } break; + case 9: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13578,9 +14680,9 @@ class PartitionValuesRequest { { $output->writeListBegin(TType::STRUCT, count($this->partitionKeys)); { - foreach ($this->partitionKeys as $iter458) + foreach ($this->partitionKeys as $iter465) { - $xfer += $iter458->write($output); + $xfer += $iter465->write($output); } } $output->writeListEnd(); @@ -13605,9 +14707,9 @@ class PartitionValuesRequest { { $output->writeListBegin(TType::STRUCT, count($this->partitionOrder)); { - foreach ($this->partitionOrder as $iter459) + foreach ($this->partitionOrder as $iter466) { - $xfer += $iter459->write($output); + $xfer += $iter466->write($output); } } $output->writeListEnd(); @@ -13624,6 +14726,11 @@ class PartitionValuesRequest { $xfer += $output->writeI64($this->maxParts); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 9); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -13681,14 +14788,14 @@ class PartitionValuesRow { case 1: if ($ftype == TType::LST) { $this->row = array(); - $_size460 = 0; - $_etype463 = 0; - $xfer += $input->readListBegin($_etype463, $_size460); - for ($_i464 = 0; $_i464 < $_size460; ++$_i464) + $_size467 = 0; + $_etype470 = 0; + $xfer += $input->readListBegin($_etype470, $_size467); + for ($_i471 = 0; $_i471 < $_size467; ++$_i471) { - $elem465 = null; - $xfer += $input->readString($elem465); - $this->row []= $elem465; + $elem472 = null; + $xfer += $input->readString($elem472); + $this->row []= $elem472; } $xfer += $input->readListEnd(); } else { @@ -13716,9 +14823,9 @@ class PartitionValuesRow { { $output->writeListBegin(TType::STRING, count($this->row)); { - foreach ($this->row as $iter466) + foreach ($this->row as $iter473) { - $xfer += $output->writeString($iter466); + $xfer += $output->writeString($iter473); } } $output->writeListEnd(); @@ -13783,15 +14890,15 @@ class PartitionValuesResponse { case 1: if ($ftype == TType::LST) { $this->partitionValues = array(); - $_size467 = 0; - $_etype470 = 0; - $xfer += $input->readListBegin($_etype470, $_size467); - for ($_i471 = 0; $_i471 < $_size467; ++$_i471) + $_size474 = 0; + $_etype477 = 0; + $xfer += $input->readListBegin($_etype477, $_size474); + for ($_i478 = 0; $_i478 < $_size474; ++$_i478) { - $elem472 = null; - $elem472 = new \metastore\PartitionValuesRow(); - $xfer += $elem472->read($input); - $this->partitionValues []= $elem472; + $elem479 = null; + $elem479 = new \metastore\PartitionValuesRow(); + $xfer += $elem479->read($input); + $this->partitionValues []= $elem479; } $xfer += $input->readListEnd(); } else { @@ -13819,9 +14926,9 @@ class PartitionValuesResponse { { $output->writeListBegin(TType::STRUCT, count($this->partitionValues)); { - foreach ($this->partitionValues as $iter473) + foreach ($this->partitionValues as $iter480) { - $xfer += $iter473->write($output); + $xfer += $iter480->write($output); } } $output->writeListEnd(); @@ -13968,6 +15075,10 @@ class Function { * @var \metastore\ResourceUri[] */ public $resourceUris = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -14009,6 +15120,10 @@ class Function { 'class' => '\metastore\ResourceUri', ), ), + 9 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -14036,6 +15151,9 @@ class Function { if (isset($vals['resourceUris'])) { $this->resourceUris = $vals['resourceUris']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -14110,21 +15228,28 @@ class Function { case 8: if ($ftype == TType::LST) { $this->resourceUris = array(); - $_size474 = 0; - $_etype477 = 0; - $xfer += $input->readListBegin($_etype477, $_size474); - for ($_i478 = 0; $_i478 < $_size474; ++$_i478) + $_size481 = 0; + $_etype484 = 0; + $xfer += $input->readListBegin($_etype484, $_size481); + for ($_i485 = 0; $_i485 < $_size481; ++$_i485) { - $elem479 = null; - $elem479 = new \metastore\ResourceUri(); - $xfer += $elem479->read($input); - $this->resourceUris []= $elem479; + $elem486 = null; + $elem486 = new \metastore\ResourceUri(); + $xfer += $elem486->read($input); + $this->resourceUris []= $elem486; } $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; + case 9: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -14181,15 +15306,20 @@ class Function { { $output->writeListBegin(TType::STRUCT, count($this->resourceUris)); { - foreach ($this->resourceUris as $iter480) + foreach ($this->resourceUris as $iter487) { - $xfer += $iter480->write($output); + $xfer += $iter487->write($output); } } $output->writeListEnd(); } $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 9); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -14525,15 +15655,15 @@ class GetOpenTxnsInfoResponse { case 2: if ($ftype == TType::LST) { $this->open_txns = array(); - $_size481 = 0; - $_etype484 = 0; - $xfer += $input->readListBegin($_etype484, $_size481); - for ($_i485 = 0; $_i485 < $_size481; ++$_i485) + $_size488 = 0; + $_etype491 = 0; + $xfer += $input->readListBegin($_etype491, $_size488); + for ($_i492 = 0; $_i492 < $_size488; ++$_i492) { - $elem486 = null; - $elem486 = new \metastore\TxnInfo(); - $xfer += $elem486->read($input); - $this->open_txns []= $elem486; + $elem493 = null; + $elem493 = new \metastore\TxnInfo(); + $xfer += $elem493->read($input); + $this->open_txns []= $elem493; } $xfer += $input->readListEnd(); } else { @@ -14566,9 +15696,9 @@ class GetOpenTxnsInfoResponse { { $output->writeListBegin(TType::STRUCT, count($this->open_txns)); { - foreach ($this->open_txns as $iter487) + foreach ($this->open_txns as $iter494) { - $xfer += $iter487->write($output); + $xfer += $iter494->write($output); } } $output->writeListEnd(); @@ -14672,14 +15802,14 @@ class GetOpenTxnsResponse { case 2: if ($ftype == TType::LST) { $this->open_txns = array(); - $_size488 = 0; - $_etype491 = 0; - $xfer += $input->readListBegin($_etype491, $_size488); - for ($_i492 = 0; $_i492 < $_size488; ++$_i492) + $_size495 = 0; + $_etype498 = 0; + $xfer += $input->readListBegin($_etype498, $_size495); + for ($_i499 = 0; $_i499 < $_size495; ++$_i499) { - $elem493 = null; - $xfer += $input->readI64($elem493); - $this->open_txns []= $elem493; + $elem500 = null; + $xfer += $input->readI64($elem500); + $this->open_txns []= $elem500; } $xfer += $input->readListEnd(); } else { @@ -14726,9 +15856,9 @@ class GetOpenTxnsResponse { { $output->writeListBegin(TType::I64, count($this->open_txns)); { - foreach ($this->open_txns as $iter494) + foreach ($this->open_txns as $iter501) { - $xfer += $output->writeI64($iter494); + $xfer += $output->writeI64($iter501); } } $output->writeListEnd(); @@ -14946,14 +16076,14 @@ class OpenTxnsResponse { case 1: if ($ftype == TType::LST) { $this->txn_ids = array(); - $_size495 = 0; - $_etype498 = 0; - $xfer += $input->readListBegin($_etype498, $_size495); - for ($_i499 = 0; $_i499 < $_size495; ++$_i499) + $_size502 = 0; + $_etype505 = 0; + $xfer += $input->readListBegin($_etype505, $_size502); + for ($_i506 = 0; $_i506 < $_size502; ++$_i506) { - $elem500 = null; - $xfer += $input->readI64($elem500); - $this->txn_ids []= $elem500; + $elem507 = null; + $xfer += $input->readI64($elem507); + $this->txn_ids []= $elem507; } $xfer += $input->readListEnd(); } else { @@ -14981,9 +16111,9 @@ class OpenTxnsResponse { { $output->writeListBegin(TType::I64, count($this->txn_ids)); { - foreach ($this->txn_ids as $iter501) + foreach ($this->txn_ids as $iter508) { - $xfer += $output->writeI64($iter501); + $xfer += $output->writeI64($iter508); } } $output->writeListEnd(); @@ -15122,14 +16252,14 @@ class AbortTxnsRequest { case 1: if ($ftype == TType::LST) { $this->txn_ids = array(); - $_size502 = 0; - $_etype505 = 0; - $xfer += $input->readListBegin($_etype505, $_size502); - for ($_i506 = 0; $_i506 < $_size502; ++$_i506) + $_size509 = 0; + $_etype512 = 0; + $xfer += $input->readListBegin($_etype512, $_size509); + for ($_i513 = 0; $_i513 < $_size509; ++$_i513) { - $elem507 = null; - $xfer += $input->readI64($elem507); - $this->txn_ids []= $elem507; + $elem514 = null; + $xfer += $input->readI64($elem514); + $this->txn_ids []= $elem514; } $xfer += $input->readListEnd(); } else { @@ -15157,9 +16287,9 @@ class AbortTxnsRequest { { $output->writeListBegin(TType::I64, count($this->txn_ids)); { - foreach ($this->txn_ids as $iter508) + foreach ($this->txn_ids as $iter515) { - $xfer += $output->writeI64($iter508); + $xfer += $output->writeI64($iter515); } } $output->writeListEnd(); @@ -15309,14 +16439,14 @@ class GetValidWriteIdsRequest { case 1: if ($ftype == TType::LST) { $this->fullTableNames = array(); - $_size509 = 0; - $_etype512 = 0; - $xfer += $input->readListBegin($_etype512, $_size509); - for ($_i513 = 0; $_i513 < $_size509; ++$_i513) + $_size516 = 0; + $_etype519 = 0; + $xfer += $input->readListBegin($_etype519, $_size516); + for ($_i520 = 0; $_i520 < $_size516; ++$_i520) { - $elem514 = null; - $xfer += $input->readString($elem514); - $this->fullTableNames []= $elem514; + $elem521 = null; + $xfer += $input->readString($elem521); + $this->fullTableNames []= $elem521; } $xfer += $input->readListEnd(); } else { @@ -15351,9 +16481,9 @@ class GetValidWriteIdsRequest { { $output->writeListBegin(TType::STRING, count($this->fullTableNames)); { - foreach ($this->fullTableNames as $iter515) + foreach ($this->fullTableNames as $iter522) { - $xfer += $output->writeString($iter515); + $xfer += $output->writeString($iter522); } } $output->writeListEnd(); @@ -15480,14 +16610,14 @@ class TableValidWriteIds { case 3: if ($ftype == TType::LST) { $this->invalidWriteIds = array(); - $_size516 = 0; - $_etype519 = 0; - $xfer += $input->readListBegin($_etype519, $_size516); - for ($_i520 = 0; $_i520 < $_size516; ++$_i520) + $_size523 = 0; + $_etype526 = 0; + $xfer += $input->readListBegin($_etype526, $_size523); + for ($_i527 = 0; $_i527 < $_size523; ++$_i527) { - $elem521 = null; - $xfer += $input->readI64($elem521); - $this->invalidWriteIds []= $elem521; + $elem528 = null; + $xfer += $input->readI64($elem528); + $this->invalidWriteIds []= $elem528; } $xfer += $input->readListEnd(); } else { @@ -15539,9 +16669,9 @@ class TableValidWriteIds { { $output->writeListBegin(TType::I64, count($this->invalidWriteIds)); { - foreach ($this->invalidWriteIds as $iter522) + foreach ($this->invalidWriteIds as $iter529) { - $xfer += $output->writeI64($iter522); + $xfer += $output->writeI64($iter529); } } $output->writeListEnd(); @@ -15616,15 +16746,15 @@ class GetValidWriteIdsResponse { case 1: if ($ftype == TType::LST) { $this->tblValidWriteIds = array(); - $_size523 = 0; - $_etype526 = 0; - $xfer += $input->readListBegin($_etype526, $_size523); - for ($_i527 = 0; $_i527 < $_size523; ++$_i527) + $_size530 = 0; + $_etype533 = 0; + $xfer += $input->readListBegin($_etype533, $_size530); + for ($_i534 = 0; $_i534 < $_size530; ++$_i534) { - $elem528 = null; - $elem528 = new \metastore\TableValidWriteIds(); - $xfer += $elem528->read($input); - $this->tblValidWriteIds []= $elem528; + $elem535 = null; + $elem535 = new \metastore\TableValidWriteIds(); + $xfer += $elem535->read($input); + $this->tblValidWriteIds []= $elem535; } $xfer += $input->readListEnd(); } else { @@ -15652,9 +16782,9 @@ class GetValidWriteIdsResponse { { $output->writeListBegin(TType::STRUCT, count($this->tblValidWriteIds)); { - foreach ($this->tblValidWriteIds as $iter529) + foreach ($this->tblValidWriteIds as $iter536) { - $xfer += $iter529->write($output); + $xfer += $iter536->write($output); } } $output->writeListEnd(); @@ -15740,14 +16870,14 @@ class AllocateTableWriteIdsRequest { case 1: if ($ftype == TType::LST) { $this->txnIds = array(); - $_size530 = 0; - $_etype533 = 0; - $xfer += $input->readListBegin($_etype533, $_size530); - for ($_i534 = 0; $_i534 < $_size530; ++$_i534) + $_size537 = 0; + $_etype540 = 0; + $xfer += $input->readListBegin($_etype540, $_size537); + for ($_i541 = 0; $_i541 < $_size537; ++$_i541) { - $elem535 = null; - $xfer += $input->readI64($elem535); - $this->txnIds []= $elem535; + $elem542 = null; + $xfer += $input->readI64($elem542); + $this->txnIds []= $elem542; } $xfer += $input->readListEnd(); } else { @@ -15789,9 +16919,9 @@ class AllocateTableWriteIdsRequest { { $output->writeListBegin(TType::I64, count($this->txnIds)); { - foreach ($this->txnIds as $iter536) + foreach ($this->txnIds as $iter543) { - $xfer += $output->writeI64($iter536); + $xfer += $output->writeI64($iter543); } } $output->writeListEnd(); @@ -15964,15 +17094,15 @@ class AllocateTableWriteIdsResponse { case 1: if ($ftype == TType::LST) { $this->txnToWriteIds = array(); - $_size537 = 0; - $_etype540 = 0; - $xfer += $input->readListBegin($_etype540, $_size537); - for ($_i541 = 0; $_i541 < $_size537; ++$_i541) + $_size544 = 0; + $_etype547 = 0; + $xfer += $input->readListBegin($_etype547, $_size544); + for ($_i548 = 0; $_i548 < $_size544; ++$_i548) { - $elem542 = null; - $elem542 = new \metastore\TxnToWriteId(); - $xfer += $elem542->read($input); - $this->txnToWriteIds []= $elem542; + $elem549 = null; + $elem549 = new \metastore\TxnToWriteId(); + $xfer += $elem549->read($input); + $this->txnToWriteIds []= $elem549; } $xfer += $input->readListEnd(); } else { @@ -16000,9 +17130,9 @@ class AllocateTableWriteIdsResponse { { $output->writeListBegin(TType::STRUCT, count($this->txnToWriteIds)); { - foreach ($this->txnToWriteIds as $iter543) + foreach ($this->txnToWriteIds as $iter550) { - $xfer += $iter543->write($output); + $xfer += $iter550->write($output); } } $output->writeListEnd(); @@ -16347,15 +17477,15 @@ class LockRequest { case 1: if ($ftype == TType::LST) { $this->component = array(); - $_size544 = 0; - $_etype547 = 0; - $xfer += $input->readListBegin($_etype547, $_size544); - for ($_i548 = 0; $_i548 < $_size544; ++$_i548) + $_size551 = 0; + $_etype554 = 0; + $xfer += $input->readListBegin($_etype554, $_size551); + for ($_i555 = 0; $_i555 < $_size551; ++$_i555) { - $elem549 = null; - $elem549 = new \metastore\LockComponent(); - $xfer += $elem549->read($input); - $this->component []= $elem549; + $elem556 = null; + $elem556 = new \metastore\LockComponent(); + $xfer += $elem556->read($input); + $this->component []= $elem556; } $xfer += $input->readListEnd(); } else { @@ -16411,9 +17541,9 @@ class LockRequest { { $output->writeListBegin(TType::STRUCT, count($this->component)); { - foreach ($this->component as $iter550) + foreach ($this->component as $iter557) { - $xfer += $iter550->write($output); + $xfer += $iter557->write($output); } } $output->writeListEnd(); @@ -17356,15 +18486,15 @@ class ShowLocksResponse { case 1: if ($ftype == TType::LST) { $this->locks = array(); - $_size551 = 0; - $_etype554 = 0; - $xfer += $input->readListBegin($_etype554, $_size551); - for ($_i555 = 0; $_i555 < $_size551; ++$_i555) + $_size558 = 0; + $_etype561 = 0; + $xfer += $input->readListBegin($_etype561, $_size558); + for ($_i562 = 0; $_i562 < $_size558; ++$_i562) { - $elem556 = null; - $elem556 = new \metastore\ShowLocksResponseElement(); - $xfer += $elem556->read($input); - $this->locks []= $elem556; + $elem563 = null; + $elem563 = new \metastore\ShowLocksResponseElement(); + $xfer += $elem563->read($input); + $this->locks []= $elem563; } $xfer += $input->readListEnd(); } else { @@ -17392,9 +18522,9 @@ class ShowLocksResponse { { $output->writeListBegin(TType::STRUCT, count($this->locks)); { - foreach ($this->locks as $iter557) + foreach ($this->locks as $iter564) { - $xfer += $iter557->write($output); + $xfer += $iter564->write($output); } } $output->writeListEnd(); @@ -17669,17 +18799,17 @@ class HeartbeatTxnRangeResponse { case 1: if ($ftype == TType::SET) { $this->aborted = array(); - $_size558 = 0; - $_etype561 = 0; - $xfer += $input->readSetBegin($_etype561, $_size558); - for ($_i562 = 0; $_i562 < $_size558; ++$_i562) + $_size565 = 0; + $_etype568 = 0; + $xfer += $input->readSetBegin($_etype568, $_size565); + for ($_i569 = 0; $_i569 < $_size565; ++$_i569) { - $elem563 = null; - $xfer += $input->readI64($elem563); - if (is_scalar($elem563)) { - $this->aborted[$elem563] = true; + $elem570 = null; + $xfer += $input->readI64($elem570); + if (is_scalar($elem570)) { + $this->aborted[$elem570] = true; } else { - $this->aborted []= $elem563; + $this->aborted []= $elem570; } } $xfer += $input->readSetEnd(); @@ -17690,17 +18820,17 @@ class HeartbeatTxnRangeResponse { case 2: if ($ftype == TType::SET) { $this->nosuch = array(); - $_size564 = 0; - $_etype567 = 0; - $xfer += $input->readSetBegin($_etype567, $_size564); - for ($_i568 = 0; $_i568 < $_size564; ++$_i568) + $_size571 = 0; + $_etype574 = 0; + $xfer += $input->readSetBegin($_etype574, $_size571); + for ($_i575 = 0; $_i575 < $_size571; ++$_i575) { - $elem569 = null; - $xfer += $input->readI64($elem569); - if (is_scalar($elem569)) { - $this->nosuch[$elem569] = true; + $elem576 = null; + $xfer += $input->readI64($elem576); + if (is_scalar($elem576)) { + $this->nosuch[$elem576] = true; } else { - $this->nosuch []= $elem569; + $this->nosuch []= $elem576; } } $xfer += $input->readSetEnd(); @@ -17729,12 +18859,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->aborted)); { - foreach ($this->aborted as $iter570 => $iter571) + foreach ($this->aborted as $iter577 => $iter578) { - if (is_scalar($iter571)) { - $xfer += $output->writeI64($iter570); + if (is_scalar($iter578)) { + $xfer += $output->writeI64($iter577); } else { - $xfer += $output->writeI64($iter571); + $xfer += $output->writeI64($iter578); } } } @@ -17750,12 +18880,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->nosuch)); { - foreach ($this->nosuch as $iter572 => $iter573) + foreach ($this->nosuch as $iter579 => $iter580) { - if (is_scalar($iter573)) { - $xfer += $output->writeI64($iter572); + if (is_scalar($iter580)) { + $xfer += $output->writeI64($iter579); } else { - $xfer += $output->writeI64($iter573); + $xfer += $output->writeI64($iter580); } } } @@ -17914,17 +19044,17 @@ class CompactionRequest { case 6: if ($ftype == TType::MAP) { $this->properties = array(); - $_size574 = 0; - $_ktype575 = 0; - $_vtype576 = 0; - $xfer += $input->readMapBegin($_ktype575, $_vtype576, $_size574); - for ($_i578 = 0; $_i578 < $_size574; ++$_i578) + $_size581 = 0; + $_ktype582 = 0; + $_vtype583 = 0; + $xfer += $input->readMapBegin($_ktype582, $_vtype583, $_size581); + for ($_i585 = 0; $_i585 < $_size581; ++$_i585) { - $key579 = ''; - $val580 = ''; - $xfer += $input->readString($key579); - $xfer += $input->readString($val580); - $this->properties[$key579] = $val580; + $key586 = ''; + $val587 = ''; + $xfer += $input->readString($key586); + $xfer += $input->readString($val587); + $this->properties[$key586] = $val587; } $xfer += $input->readMapEnd(); } else { @@ -17977,10 +19107,10 @@ class CompactionRequest { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter581 => $viter582) + foreach ($this->properties as $kiter588 => $viter589) { - $xfer += $output->writeString($kiter581); - $xfer += $output->writeString($viter582); + $xfer += $output->writeString($kiter588); + $xfer += $output->writeString($viter589); } } $output->writeMapEnd(); @@ -18567,15 +19697,15 @@ class ShowCompactResponse { case 1: if ($ftype == TType::LST) { $this->compacts = array(); - $_size583 = 0; - $_etype586 = 0; - $xfer += $input->readListBegin($_etype586, $_size583); - for ($_i587 = 0; $_i587 < $_size583; ++$_i587) + $_size590 = 0; + $_etype593 = 0; + $xfer += $input->readListBegin($_etype593, $_size590); + for ($_i594 = 0; $_i594 < $_size590; ++$_i594) { - $elem588 = null; - $elem588 = new \metastore\ShowCompactResponseElement(); - $xfer += $elem588->read($input); - $this->compacts []= $elem588; + $elem595 = null; + $elem595 = new \metastore\ShowCompactResponseElement(); + $xfer += $elem595->read($input); + $this->compacts []= $elem595; } $xfer += $input->readListEnd(); } else { @@ -18603,9 +19733,9 @@ class ShowCompactResponse { { $output->writeListBegin(TType::STRUCT, count($this->compacts)); { - foreach ($this->compacts as $iter589) + foreach ($this->compacts as $iter596) { - $xfer += $iter589->write($output); + $xfer += $iter596->write($output); } } $output->writeListEnd(); @@ -18752,14 +19882,14 @@ class AddDynamicPartitions { case 5: if ($ftype == TType::LST) { $this->partitionnames = array(); - $_size590 = 0; - $_etype593 = 0; - $xfer += $input->readListBegin($_etype593, $_size590); - for ($_i594 = 0; $_i594 < $_size590; ++$_i594) + $_size597 = 0; + $_etype600 = 0; + $xfer += $input->readListBegin($_etype600, $_size597); + for ($_i601 = 0; $_i601 < $_size597; ++$_i601) { - $elem595 = null; - $xfer += $input->readString($elem595); - $this->partitionnames []= $elem595; + $elem602 = null; + $xfer += $input->readString($elem602); + $this->partitionnames []= $elem602; } $xfer += $input->readListEnd(); } else { @@ -18814,9 +19944,9 @@ class AddDynamicPartitions { { $output->writeListBegin(TType::STRING, count($this->partitionnames)); { - foreach ($this->partitionnames as $iter596) + foreach ($this->partitionnames as $iter603) { - $xfer += $output->writeString($iter596); + $xfer += $output->writeString($iter603); } } $output->writeListEnd(); @@ -19031,6 +20161,10 @@ class CreationMetadata { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $dbName = null; /** * @var string @@ -19049,14 +20183,18 @@ class CreationMetadata { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'dbName', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( - 'var' => 'tblName', + 'var' => 'dbName', 'type' => TType::STRING, ), 3 => array( + 'var' => 'tblName', + 'type' => TType::STRING, + ), + 4 => array( 'var' => 'tablesUsed', 'type' => TType::SET, 'etype' => TType::STRING, @@ -19064,13 +20202,16 @@ class CreationMetadata { 'type' => TType::STRING, ), ), - 4 => array( + 5 => array( 'var' => 'validTxnList', 'type' => TType::STRING, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['dbName'])) { $this->dbName = $vals['dbName']; } @@ -19107,32 +20248,39 @@ class CreationMetadata { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->dbName); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->tblName); + $xfer += $input->readString($this->dbName); } else { $xfer += $input->skip($ftype); } break; case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tblName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: if ($ftype == TType::SET) { $this->tablesUsed = array(); - $_size597 = 0; - $_etype600 = 0; - $xfer += $input->readSetBegin($_etype600, $_size597); - for ($_i601 = 0; $_i601 < $_size597; ++$_i601) + $_size604 = 0; + $_etype607 = 0; + $xfer += $input->readSetBegin($_etype607, $_size604); + for ($_i608 = 0; $_i608 < $_size604; ++$_i608) { - $elem602 = null; - $xfer += $input->readString($elem602); - if (is_scalar($elem602)) { - $this->tablesUsed[$elem602] = true; + $elem609 = null; + $xfer += $input->readString($elem609); + if (is_scalar($elem609)) { + $this->tablesUsed[$elem609] = true; } else { - $this->tablesUsed []= $elem602; + $this->tablesUsed []= $elem609; } } $xfer += $input->readSetEnd(); @@ -19140,7 +20288,7 @@ class CreationMetadata { $xfer += $input->skip($ftype); } break; - case 4: + case 5: if ($ftype == TType::STRING) { $xfer += $input->readString($this->validTxnList); } else { @@ -19160,13 +20308,18 @@ class CreationMetadata { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('CreationMetadata'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->dbName !== null) { - $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 2); $xfer += $output->writeString($this->dbName); $xfer += $output->writeFieldEnd(); } if ($this->tblName !== null) { - $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2); + $xfer += $output->writeFieldBegin('tblName', TType::STRING, 3); $xfer += $output->writeString($this->tblName); $xfer += $output->writeFieldEnd(); } @@ -19174,16 +20327,16 @@ class CreationMetadata { if (!is_array($this->tablesUsed)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); } - $xfer += $output->writeFieldBegin('tablesUsed', TType::SET, 3); + $xfer += $output->writeFieldBegin('tablesUsed', TType::SET, 4); { $output->writeSetBegin(TType::STRING, count($this->tablesUsed)); { - foreach ($this->tablesUsed as $iter603 => $iter604) + foreach ($this->tablesUsed as $iter610 => $iter611) { - if (is_scalar($iter604)) { - $xfer += $output->writeString($iter603); + if (is_scalar($iter611)) { + $xfer += $output->writeString($iter610); } else { - $xfer += $output->writeString($iter604); + $xfer += $output->writeString($iter611); } } } @@ -19192,7 +20345,7 @@ class CreationMetadata { $xfer += $output->writeFieldEnd(); } if ($this->validTxnList !== null) { - $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 4); + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 5); $xfer += $output->writeString($this->validTxnList); $xfer += $output->writeFieldEnd(); } @@ -19332,6 +20485,10 @@ class NotificationEvent { * @var string */ public $messageFormat = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -19364,6 +20521,10 @@ class NotificationEvent { 'var' => 'messageFormat', 'type' => TType::STRING, ), + 8 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -19388,6 +20549,9 @@ class NotificationEvent { if (isset($vals['messageFormat'])) { $this->messageFormat = $vals['messageFormat']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -19459,6 +20623,13 @@ class NotificationEvent { $xfer += $input->skip($ftype); } break; + case 8: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -19507,6 +20678,11 @@ class NotificationEvent { $xfer += $output->writeString($this->messageFormat); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 8); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -19565,15 +20741,15 @@ class NotificationEventResponse { case 1: if ($ftype == TType::LST) { $this->events = array(); - $_size605 = 0; - $_etype608 = 0; - $xfer += $input->readListBegin($_etype608, $_size605); - for ($_i609 = 0; $_i609 < $_size605; ++$_i609) + $_size612 = 0; + $_etype615 = 0; + $xfer += $input->readListBegin($_etype615, $_size612); + for ($_i616 = 0; $_i616 < $_size612; ++$_i616) { - $elem610 = null; - $elem610 = new \metastore\NotificationEvent(); - $xfer += $elem610->read($input); - $this->events []= $elem610; + $elem617 = null; + $elem617 = new \metastore\NotificationEvent(); + $xfer += $elem617->read($input); + $this->events []= $elem617; } $xfer += $input->readListEnd(); } else { @@ -19601,9 +20777,9 @@ class NotificationEventResponse { { $output->writeListBegin(TType::STRUCT, count($this->events)); { - foreach ($this->events as $iter611) + foreach ($this->events as $iter618) { - $xfer += $iter611->write($output); + $xfer += $iter618->write($output); } } $output->writeListEnd(); @@ -19703,6 +20879,10 @@ class NotificationEventsCountRequest { * @var string */ public $dbName = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -19715,6 +20895,10 @@ class NotificationEventsCountRequest { 'var' => 'dbName', 'type' => TType::STRING, ), + 3 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -19724,6 +20908,9 @@ class NotificationEventsCountRequest { if (isset($vals['dbName'])) { $this->dbName = $vals['dbName']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -19760,6 +20947,13 @@ class NotificationEventsCountRequest { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -19783,6 +20977,11 @@ class NotificationEventsCountRequest { $xfer += $output->writeString($this->dbName); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 3); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -19948,14 +21147,14 @@ class InsertEventRequestData { case 2: if ($ftype == TType::LST) { $this->filesAdded = array(); - $_size612 = 0; - $_etype615 = 0; - $xfer += $input->readListBegin($_etype615, $_size612); - for ($_i616 = 0; $_i616 < $_size612; ++$_i616) + $_size619 = 0; + $_etype622 = 0; + $xfer += $input->readListBegin($_etype622, $_size619); + for ($_i623 = 0; $_i623 < $_size619; ++$_i623) { - $elem617 = null; - $xfer += $input->readString($elem617); - $this->filesAdded []= $elem617; + $elem624 = null; + $xfer += $input->readString($elem624); + $this->filesAdded []= $elem624; } $xfer += $input->readListEnd(); } else { @@ -19965,14 +21164,14 @@ class InsertEventRequestData { case 3: if ($ftype == TType::LST) { $this->filesAddedChecksum = array(); - $_size618 = 0; - $_etype621 = 0; - $xfer += $input->readListBegin($_etype621, $_size618); - for ($_i622 = 0; $_i622 < $_size618; ++$_i622) + $_size625 = 0; + $_etype628 = 0; + $xfer += $input->readListBegin($_etype628, $_size625); + for ($_i629 = 0; $_i629 < $_size625; ++$_i629) { - $elem623 = null; - $xfer += $input->readString($elem623); - $this->filesAddedChecksum []= $elem623; + $elem630 = null; + $xfer += $input->readString($elem630); + $this->filesAddedChecksum []= $elem630; } $xfer += $input->readListEnd(); } else { @@ -20005,9 +21204,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAdded)); { - foreach ($this->filesAdded as $iter624) + foreach ($this->filesAdded as $iter631) { - $xfer += $output->writeString($iter624); + $xfer += $output->writeString($iter631); } } $output->writeListEnd(); @@ -20022,9 +21221,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAddedChecksum)); { - foreach ($this->filesAddedChecksum as $iter625) + foreach ($this->filesAddedChecksum as $iter632) { - $xfer += $output->writeString($iter625); + $xfer += $output->writeString($iter632); } } $output->writeListEnd(); @@ -20141,6 +21340,10 @@ class FireEventRequest { * @var string[] */ public $partitionVals = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -20170,6 +21373,10 @@ class FireEventRequest { 'type' => TType::STRING, ), ), + 6 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -20188,6 +21395,9 @@ class FireEventRequest { if (isset($vals['partitionVals'])) { $this->partitionVals = $vals['partitionVals']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -20242,20 +21452,27 @@ class FireEventRequest { case 5: if ($ftype == TType::LST) { $this->partitionVals = array(); - $_size626 = 0; - $_etype629 = 0; - $xfer += $input->readListBegin($_etype629, $_size626); - for ($_i630 = 0; $_i630 < $_size626; ++$_i630) + $_size633 = 0; + $_etype636 = 0; + $xfer += $input->readListBegin($_etype636, $_size633); + for ($_i637 = 0; $_i637 < $_size633; ++$_i637) { - $elem631 = null; - $xfer += $input->readString($elem631); - $this->partitionVals []= $elem631; + $elem638 = null; + $xfer += $input->readString($elem638); + $this->partitionVals []= $elem638; } $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -20300,15 +21517,20 @@ class FireEventRequest { { $output->writeListBegin(TType::STRING, count($this->partitionVals)); { - foreach ($this->partitionVals as $iter632) + foreach ($this->partitionVals as $iter639) { - $xfer += $output->writeString($iter632); + $xfer += $output->writeString($iter639); } } $output->writeListEnd(); } $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 6); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -20530,18 +21752,18 @@ class GetFileMetadataByExprResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size633 = 0; - $_ktype634 = 0; - $_vtype635 = 0; - $xfer += $input->readMapBegin($_ktype634, $_vtype635, $_size633); - for ($_i637 = 0; $_i637 < $_size633; ++$_i637) + $_size640 = 0; + $_ktype641 = 0; + $_vtype642 = 0; + $xfer += $input->readMapBegin($_ktype641, $_vtype642, $_size640); + for ($_i644 = 0; $_i644 < $_size640; ++$_i644) { - $key638 = 0; - $val639 = new \metastore\MetadataPpdResult(); - $xfer += $input->readI64($key638); - $val639 = new \metastore\MetadataPpdResult(); - $xfer += $val639->read($input); - $this->metadata[$key638] = $val639; + $key645 = 0; + $val646 = new \metastore\MetadataPpdResult(); + $xfer += $input->readI64($key645); + $val646 = new \metastore\MetadataPpdResult(); + $xfer += $val646->read($input); + $this->metadata[$key645] = $val646; } $xfer += $input->readMapEnd(); } else { @@ -20576,10 +21798,10 @@ class GetFileMetadataByExprResult { { $output->writeMapBegin(TType::I64, TType::STRUCT, count($this->metadata)); { - foreach ($this->metadata as $kiter640 => $viter641) + foreach ($this->metadata as $kiter647 => $viter648) { - $xfer += $output->writeI64($kiter640); - $xfer += $viter641->write($output); + $xfer += $output->writeI64($kiter647); + $xfer += $viter648->write($output); } } $output->writeMapEnd(); @@ -20681,14 +21903,14 @@ class GetFileMetadataByExprRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size642 = 0; - $_etype645 = 0; - $xfer += $input->readListBegin($_etype645, $_size642); - for ($_i646 = 0; $_i646 < $_size642; ++$_i646) + $_size649 = 0; + $_etype652 = 0; + $xfer += $input->readListBegin($_etype652, $_size649); + for ($_i653 = 0; $_i653 < $_size649; ++$_i653) { - $elem647 = null; - $xfer += $input->readI64($elem647); - $this->fileIds []= $elem647; + $elem654 = null; + $xfer += $input->readI64($elem654); + $this->fileIds []= $elem654; } $xfer += $input->readListEnd(); } else { @@ -20737,9 +21959,9 @@ class GetFileMetadataByExprRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter648) + foreach ($this->fileIds as $iter655) { - $xfer += $output->writeI64($iter648); + $xfer += $output->writeI64($iter655); } } $output->writeListEnd(); @@ -20833,17 +22055,17 @@ class GetFileMetadataResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size649 = 0; - $_ktype650 = 0; - $_vtype651 = 0; - $xfer += $input->readMapBegin($_ktype650, $_vtype651, $_size649); - for ($_i653 = 0; $_i653 < $_size649; ++$_i653) + $_size656 = 0; + $_ktype657 = 0; + $_vtype658 = 0; + $xfer += $input->readMapBegin($_ktype657, $_vtype658, $_size656); + for ($_i660 = 0; $_i660 < $_size656; ++$_i660) { - $key654 = 0; - $val655 = ''; - $xfer += $input->readI64($key654); - $xfer += $input->readString($val655); - $this->metadata[$key654] = $val655; + $key661 = 0; + $val662 = ''; + $xfer += $input->readI64($key661); + $xfer += $input->readString($val662); + $this->metadata[$key661] = $val662; } $xfer += $input->readMapEnd(); } else { @@ -20878,10 +22100,10 @@ class GetFileMetadataResult { { $output->writeMapBegin(TType::I64, TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $kiter656 => $viter657) + foreach ($this->metadata as $kiter663 => $viter664) { - $xfer += $output->writeI64($kiter656); - $xfer += $output->writeString($viter657); + $xfer += $output->writeI64($kiter663); + $xfer += $output->writeString($viter664); } } $output->writeMapEnd(); @@ -20950,14 +22172,14 @@ class GetFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size658 = 0; - $_etype661 = 0; - $xfer += $input->readListBegin($_etype661, $_size658); - for ($_i662 = 0; $_i662 < $_size658; ++$_i662) + $_size665 = 0; + $_etype668 = 0; + $xfer += $input->readListBegin($_etype668, $_size665); + for ($_i669 = 0; $_i669 < $_size665; ++$_i669) { - $elem663 = null; - $xfer += $input->readI64($elem663); - $this->fileIds []= $elem663; + $elem670 = null; + $xfer += $input->readI64($elem670); + $this->fileIds []= $elem670; } $xfer += $input->readListEnd(); } else { @@ -20985,9 +22207,9 @@ class GetFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter664) + foreach ($this->fileIds as $iter671) { - $xfer += $output->writeI64($iter664); + $xfer += $output->writeI64($iter671); } } $output->writeListEnd(); @@ -21127,14 +22349,14 @@ class PutFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size665 = 0; - $_etype668 = 0; - $xfer += $input->readListBegin($_etype668, $_size665); - for ($_i669 = 0; $_i669 < $_size665; ++$_i669) + $_size672 = 0; + $_etype675 = 0; + $xfer += $input->readListBegin($_etype675, $_size672); + for ($_i676 = 0; $_i676 < $_size672; ++$_i676) { - $elem670 = null; - $xfer += $input->readI64($elem670); - $this->fileIds []= $elem670; + $elem677 = null; + $xfer += $input->readI64($elem677); + $this->fileIds []= $elem677; } $xfer += $input->readListEnd(); } else { @@ -21144,14 +22366,14 @@ class PutFileMetadataRequest { case 2: if ($ftype == TType::LST) { $this->metadata = array(); - $_size671 = 0; - $_etype674 = 0; - $xfer += $input->readListBegin($_etype674, $_size671); - for ($_i675 = 0; $_i675 < $_size671; ++$_i675) + $_size678 = 0; + $_etype681 = 0; + $xfer += $input->readListBegin($_etype681, $_size678); + for ($_i682 = 0; $_i682 < $_size678; ++$_i682) { - $elem676 = null; - $xfer += $input->readString($elem676); - $this->metadata []= $elem676; + $elem683 = null; + $xfer += $input->readString($elem683); + $this->metadata []= $elem683; } $xfer += $input->readListEnd(); } else { @@ -21186,9 +22408,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter677) + foreach ($this->fileIds as $iter684) { - $xfer += $output->writeI64($iter677); + $xfer += $output->writeI64($iter684); } } $output->writeListEnd(); @@ -21203,9 +22425,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $iter678) + foreach ($this->metadata as $iter685) { - $xfer += $output->writeString($iter678); + $xfer += $output->writeString($iter685); } } $output->writeListEnd(); @@ -21324,14 +22546,14 @@ class ClearFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size679 = 0; - $_etype682 = 0; - $xfer += $input->readListBegin($_etype682, $_size679); - for ($_i683 = 0; $_i683 < $_size679; ++$_i683) + $_size686 = 0; + $_etype689 = 0; + $xfer += $input->readListBegin($_etype689, $_size686); + for ($_i690 = 0; $_i690 < $_size686; ++$_i690) { - $elem684 = null; - $xfer += $input->readI64($elem684); - $this->fileIds []= $elem684; + $elem691 = null; + $xfer += $input->readI64($elem691); + $this->fileIds []= $elem691; } $xfer += $input->readListEnd(); } else { @@ -21359,9 +22581,9 @@ class ClearFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter685) + foreach ($this->fileIds as $iter692) { - $xfer += $output->writeI64($iter685); + $xfer += $output->writeI64($iter692); } } $output->writeListEnd(); @@ -21645,15 +22867,15 @@ class GetAllFunctionsResponse { case 1: if ($ftype == TType::LST) { $this->functions = array(); - $_size686 = 0; - $_etype689 = 0; - $xfer += $input->readListBegin($_etype689, $_size686); - for ($_i690 = 0; $_i690 < $_size686; ++$_i690) + $_size693 = 0; + $_etype696 = 0; + $xfer += $input->readListBegin($_etype696, $_size693); + for ($_i697 = 0; $_i697 < $_size693; ++$_i697) { - $elem691 = null; - $elem691 = new \metastore\Function(); - $xfer += $elem691->read($input); - $this->functions []= $elem691; + $elem698 = null; + $elem698 = new \metastore\Function(); + $xfer += $elem698->read($input); + $this->functions []= $elem698; } $xfer += $input->readListEnd(); } else { @@ -21681,9 +22903,9 @@ class GetAllFunctionsResponse { { $output->writeListBegin(TType::STRUCT, count($this->functions)); { - foreach ($this->functions as $iter692) + foreach ($this->functions as $iter699) { - $xfer += $iter692->write($output); + $xfer += $iter699->write($output); } } $output->writeListEnd(); @@ -21747,14 +22969,14 @@ class ClientCapabilities { case 1: if ($ftype == TType::LST) { $this->values = array(); - $_size693 = 0; - $_etype696 = 0; - $xfer += $input->readListBegin($_etype696, $_size693); - for ($_i697 = 0; $_i697 < $_size693; ++$_i697) + $_size700 = 0; + $_etype703 = 0; + $xfer += $input->readListBegin($_etype703, $_size700); + for ($_i704 = 0; $_i704 < $_size700; ++$_i704) { - $elem698 = null; - $xfer += $input->readI32($elem698); - $this->values []= $elem698; + $elem705 = null; + $xfer += $input->readI32($elem705); + $this->values []= $elem705; } $xfer += $input->readListEnd(); } else { @@ -21782,9 +23004,9 @@ class ClientCapabilities { { $output->writeListBegin(TType::I32, count($this->values)); { - foreach ($this->values as $iter699) + foreach ($this->values as $iter706) { - $xfer += $output->writeI32($iter699); + $xfer += $output->writeI32($iter706); } } $output->writeListEnd(); @@ -21813,6 +23035,10 @@ class GetTableRequest { * @var \metastore\ClientCapabilities */ public $capabilities = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -21830,6 +23056,10 @@ class GetTableRequest { 'type' => TType::STRUCT, 'class' => '\metastore\ClientCapabilities', ), + 4 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -21842,6 +23072,9 @@ class GetTableRequest { if (isset($vals['capabilities'])) { $this->capabilities = $vals['capabilities']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -21886,6 +23119,13 @@ class GetTableRequest { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -21917,6 +23157,11 @@ class GetTableRequest { $xfer += $this->capabilities->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 4); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -22019,6 +23264,10 @@ class GetTablesRequest { * @var \metastore\ClientCapabilities */ public $capabilities = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -22040,6 +23289,10 @@ class GetTablesRequest { 'type' => TType::STRUCT, 'class' => '\metastore\ClientCapabilities', ), + 4 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -22052,6 +23305,9 @@ class GetTablesRequest { if (isset($vals['capabilities'])) { $this->capabilities = $vals['capabilities']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -22084,14 +23340,14 @@ class GetTablesRequest { case 2: if ($ftype == TType::LST) { $this->tblNames = array(); - $_size700 = 0; - $_etype703 = 0; - $xfer += $input->readListBegin($_etype703, $_size700); - for ($_i704 = 0; $_i704 < $_size700; ++$_i704) + $_size707 = 0; + $_etype710 = 0; + $xfer += $input->readListBegin($_etype710, $_size707); + for ($_i711 = 0; $_i711 < $_size707; ++$_i711) { - $elem705 = null; - $xfer += $input->readString($elem705); - $this->tblNames []= $elem705; + $elem712 = null; + $xfer += $input->readString($elem712); + $this->tblNames []= $elem712; } $xfer += $input->readListEnd(); } else { @@ -22106,6 +23362,13 @@ class GetTablesRequest { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -22132,9 +23395,9 @@ class GetTablesRequest { { $output->writeListBegin(TType::STRING, count($this->tblNames)); { - foreach ($this->tblNames as $iter706) + foreach ($this->tblNames as $iter713) { - $xfer += $output->writeString($iter706); + $xfer += $output->writeString($iter713); } } $output->writeListEnd(); @@ -22149,6 +23412,11 @@ class GetTablesRequest { $xfer += $this->capabilities->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 4); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -22207,15 +23475,15 @@ class GetTablesResult { case 1: if ($ftype == TType::LST) { $this->tables = array(); - $_size707 = 0; - $_etype710 = 0; - $xfer += $input->readListBegin($_etype710, $_size707); - for ($_i711 = 0; $_i711 < $_size707; ++$_i711) + $_size714 = 0; + $_etype717 = 0; + $xfer += $input->readListBegin($_etype717, $_size714); + for ($_i718 = 0; $_i718 < $_size714; ++$_i718) { - $elem712 = null; - $elem712 = new \metastore\Table(); - $xfer += $elem712->read($input); - $this->tables []= $elem712; + $elem719 = null; + $elem719 = new \metastore\Table(); + $xfer += $elem719->read($input); + $this->tables []= $elem719; } $xfer += $input->readListEnd(); } else { @@ -22243,9 +23511,9 @@ class GetTablesResult { { $output->writeListBegin(TType::STRUCT, count($this->tables)); { - foreach ($this->tables as $iter713) + foreach ($this->tables as $iter720) { - $xfer += $iter713->write($output); + $xfer += $iter720->write($output); } } $output->writeListEnd(); @@ -22426,6 +23694,10 @@ class TableMeta { * @var string */ public $comments = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -22446,6 +23718,10 @@ class TableMeta { 'var' => 'comments', 'type' => TType::STRING, ), + 5 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -22461,6 +23737,9 @@ class TableMeta { if (isset($vals['comments'])) { $this->comments = $vals['comments']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -22511,6 +23790,13 @@ class TableMeta { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -22544,6 +23830,11 @@ class TableMeta { $xfer += $output->writeString($this->comments); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 5); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -22623,17 +23914,17 @@ class Materialization { case 1: if ($ftype == TType::SET) { $this->tablesUsed = array(); - $_size714 = 0; - $_etype717 = 0; - $xfer += $input->readSetBegin($_etype717, $_size714); - for ($_i718 = 0; $_i718 < $_size714; ++$_i718) + $_size721 = 0; + $_etype724 = 0; + $xfer += $input->readSetBegin($_etype724, $_size721); + for ($_i725 = 0; $_i725 < $_size721; ++$_i725) { - $elem719 = null; - $xfer += $input->readString($elem719); - if (is_scalar($elem719)) { - $this->tablesUsed[$elem719] = true; + $elem726 = null; + $xfer += $input->readString($elem726); + if (is_scalar($elem726)) { + $this->tablesUsed[$elem726] = true; } else { - $this->tablesUsed []= $elem719; + $this->tablesUsed []= $elem726; } } $xfer += $input->readSetEnd(); @@ -22676,12 +23967,12 @@ class Materialization { { $output->writeSetBegin(TType::STRING, count($this->tablesUsed)); { - foreach ($this->tablesUsed as $iter720 => $iter721) + foreach ($this->tablesUsed as $iter727 => $iter728) { - if (is_scalar($iter721)) { - $xfer += $output->writeString($iter720); + if (is_scalar($iter728)) { + $xfer += $output->writeString($iter727); } else { - $xfer += $output->writeString($iter721); + $xfer += $output->writeString($iter728); } } } @@ -23948,15 +25239,15 @@ class WMFullResourcePlan { case 2: if ($ftype == TType::LST) { $this->pools = array(); - $_size722 = 0; - $_etype725 = 0; - $xfer += $input->readListBegin($_etype725, $_size722); - for ($_i726 = 0; $_i726 < $_size722; ++$_i726) + $_size729 = 0; + $_etype732 = 0; + $xfer += $input->readListBegin($_etype732, $_size729); + for ($_i733 = 0; $_i733 < $_size729; ++$_i733) { - $elem727 = null; - $elem727 = new \metastore\WMPool(); - $xfer += $elem727->read($input); - $this->pools []= $elem727; + $elem734 = null; + $elem734 = new \metastore\WMPool(); + $xfer += $elem734->read($input); + $this->pools []= $elem734; } $xfer += $input->readListEnd(); } else { @@ -23966,15 +25257,15 @@ class WMFullResourcePlan { case 3: if ($ftype == TType::LST) { $this->mappings = array(); - $_size728 = 0; - $_etype731 = 0; - $xfer += $input->readListBegin($_etype731, $_size728); - for ($_i732 = 0; $_i732 < $_size728; ++$_i732) + $_size735 = 0; + $_etype738 = 0; + $xfer += $input->readListBegin($_etype738, $_size735); + for ($_i739 = 0; $_i739 < $_size735; ++$_i739) { - $elem733 = null; - $elem733 = new \metastore\WMMapping(); - $xfer += $elem733->read($input); - $this->mappings []= $elem733; + $elem740 = null; + $elem740 = new \metastore\WMMapping(); + $xfer += $elem740->read($input); + $this->mappings []= $elem740; } $xfer += $input->readListEnd(); } else { @@ -23984,15 +25275,15 @@ class WMFullResourcePlan { case 4: if ($ftype == TType::LST) { $this->triggers = array(); - $_size734 = 0; - $_etype737 = 0; - $xfer += $input->readListBegin($_etype737, $_size734); - for ($_i738 = 0; $_i738 < $_size734; ++$_i738) + $_size741 = 0; + $_etype744 = 0; + $xfer += $input->readListBegin($_etype744, $_size741); + for ($_i745 = 0; $_i745 < $_size741; ++$_i745) { - $elem739 = null; - $elem739 = new \metastore\WMTrigger(); - $xfer += $elem739->read($input); - $this->triggers []= $elem739; + $elem746 = null; + $elem746 = new \metastore\WMTrigger(); + $xfer += $elem746->read($input); + $this->triggers []= $elem746; } $xfer += $input->readListEnd(); } else { @@ -24002,15 +25293,15 @@ class WMFullResourcePlan { case 5: if ($ftype == TType::LST) { $this->poolTriggers = array(); - $_size740 = 0; - $_etype743 = 0; - $xfer += $input->readListBegin($_etype743, $_size740); - for ($_i744 = 0; $_i744 < $_size740; ++$_i744) + $_size747 = 0; + $_etype750 = 0; + $xfer += $input->readListBegin($_etype750, $_size747); + for ($_i751 = 0; $_i751 < $_size747; ++$_i751) { - $elem745 = null; - $elem745 = new \metastore\WMPoolTrigger(); - $xfer += $elem745->read($input); - $this->poolTriggers []= $elem745; + $elem752 = null; + $elem752 = new \metastore\WMPoolTrigger(); + $xfer += $elem752->read($input); + $this->poolTriggers []= $elem752; } $xfer += $input->readListEnd(); } else { @@ -24046,9 +25337,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->pools)); { - foreach ($this->pools as $iter746) + foreach ($this->pools as $iter753) { - $xfer += $iter746->write($output); + $xfer += $iter753->write($output); } } $output->writeListEnd(); @@ -24063,9 +25354,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->mappings)); { - foreach ($this->mappings as $iter747) + foreach ($this->mappings as $iter754) { - $xfer += $iter747->write($output); + $xfer += $iter754->write($output); } } $output->writeListEnd(); @@ -24080,9 +25371,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->triggers)); { - foreach ($this->triggers as $iter748) + foreach ($this->triggers as $iter755) { - $xfer += $iter748->write($output); + $xfer += $iter755->write($output); } } $output->writeListEnd(); @@ -24097,9 +25388,9 @@ class WMFullResourcePlan { { $output->writeListBegin(TType::STRUCT, count($this->poolTriggers)); { - foreach ($this->poolTriggers as $iter749) + foreach ($this->poolTriggers as $iter756) { - $xfer += $iter749->write($output); + $xfer += $iter756->write($output); } } $output->writeListEnd(); @@ -24652,15 +25943,15 @@ class WMGetAllResourcePlanResponse { case 1: if ($ftype == TType::LST) { $this->resourcePlans = array(); - $_size750 = 0; - $_etype753 = 0; - $xfer += $input->readListBegin($_etype753, $_size750); - for ($_i754 = 0; $_i754 < $_size750; ++$_i754) + $_size757 = 0; + $_etype760 = 0; + $xfer += $input->readListBegin($_etype760, $_size757); + for ($_i761 = 0; $_i761 < $_size757; ++$_i761) { - $elem755 = null; - $elem755 = new \metastore\WMResourcePlan(); - $xfer += $elem755->read($input); - $this->resourcePlans []= $elem755; + $elem762 = null; + $elem762 = new \metastore\WMResourcePlan(); + $xfer += $elem762->read($input); + $this->resourcePlans []= $elem762; } $xfer += $input->readListEnd(); } else { @@ -24688,9 +25979,9 @@ class WMGetAllResourcePlanResponse { { $output->writeListBegin(TType::STRUCT, count($this->resourcePlans)); { - foreach ($this->resourcePlans as $iter756) + foreach ($this->resourcePlans as $iter763) { - $xfer += $iter756->write($output); + $xfer += $iter763->write($output); } } $output->writeListEnd(); @@ -25096,14 +26387,14 @@ class WMValidateResourcePlanResponse { case 1: if ($ftype == TType::LST) { $this->errors = array(); - $_size757 = 0; - $_etype760 = 0; - $xfer += $input->readListBegin($_etype760, $_size757); - for ($_i761 = 0; $_i761 < $_size757; ++$_i761) + $_size764 = 0; + $_etype767 = 0; + $xfer += $input->readListBegin($_etype767, $_size764); + for ($_i768 = 0; $_i768 < $_size764; ++$_i768) { - $elem762 = null; - $xfer += $input->readString($elem762); - $this->errors []= $elem762; + $elem769 = null; + $xfer += $input->readString($elem769); + $this->errors []= $elem769; } $xfer += $input->readListEnd(); } else { @@ -25113,14 +26404,14 @@ class WMValidateResourcePlanResponse { case 2: if ($ftype == TType::LST) { $this->warnings = array(); - $_size763 = 0; - $_etype766 = 0; - $xfer += $input->readListBegin($_etype766, $_size763); - for ($_i767 = 0; $_i767 < $_size763; ++$_i767) + $_size770 = 0; + $_etype773 = 0; + $xfer += $input->readListBegin($_etype773, $_size770); + for ($_i774 = 0; $_i774 < $_size770; ++$_i774) { - $elem768 = null; - $xfer += $input->readString($elem768); - $this->warnings []= $elem768; + $elem775 = null; + $xfer += $input->readString($elem775); + $this->warnings []= $elem775; } $xfer += $input->readListEnd(); } else { @@ -25148,9 +26439,9 @@ class WMValidateResourcePlanResponse { { $output->writeListBegin(TType::STRING, count($this->errors)); { - foreach ($this->errors as $iter769) + foreach ($this->errors as $iter776) { - $xfer += $output->writeString($iter769); + $xfer += $output->writeString($iter776); } } $output->writeListEnd(); @@ -25165,9 +26456,9 @@ class WMValidateResourcePlanResponse { { $output->writeListBegin(TType::STRING, count($this->warnings)); { - foreach ($this->warnings as $iter770) + foreach ($this->warnings as $iter777) { - $xfer += $output->writeString($iter770); + $xfer += $output->writeString($iter777); } } $output->writeListEnd(); @@ -25840,15 +27131,15 @@ class WMGetTriggersForResourePlanResponse { case 1: if ($ftype == TType::LST) { $this->triggers = array(); - $_size771 = 0; - $_etype774 = 0; - $xfer += $input->readListBegin($_etype774, $_size771); - for ($_i775 = 0; $_i775 < $_size771; ++$_i775) + $_size778 = 0; + $_etype781 = 0; + $xfer += $input->readListBegin($_etype781, $_size778); + for ($_i782 = 0; $_i782 < $_size778; ++$_i782) { - $elem776 = null; - $elem776 = new \metastore\WMTrigger(); - $xfer += $elem776->read($input); - $this->triggers []= $elem776; + $elem783 = null; + $elem783 = new \metastore\WMTrigger(); + $xfer += $elem783->read($input); + $this->triggers []= $elem783; } $xfer += $input->readListEnd(); } else { @@ -25876,9 +27167,9 @@ class WMGetTriggersForResourePlanResponse { { $output->writeListBegin(TType::STRUCT, count($this->triggers)); { - foreach ($this->triggers as $iter777) + foreach ($this->triggers as $iter784) { - $xfer += $iter777->write($output); + $xfer += $iter784->write($output); } } $output->writeListEnd(); @@ -26814,6 +28105,10 @@ class ISchema { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $dbName = null; /** * @var int @@ -26848,26 +28143,30 @@ class ISchema { 'type' => TType::STRING, ), 3 => array( - 'var' => 'dbName', + 'var' => 'catName', 'type' => TType::STRING, ), 4 => array( + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 5 => array( 'var' => 'compatibility', 'type' => TType::I32, ), - 5 => array( + 6 => array( 'var' => 'validationLevel', 'type' => TType::I32, ), - 6 => array( + 7 => array( 'var' => 'canEvolve', 'type' => TType::BOOL, ), - 7 => array( + 8 => array( 'var' => 'schemaGroup', 'type' => TType::STRING, ), - 8 => array( + 9 => array( 'var' => 'description', 'type' => TType::STRING, ), @@ -26880,6 +28179,9 @@ class ISchema { if (isset($vals['name'])) { $this->name = $vals['name']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['dbName'])) { $this->dbName = $vals['dbName']; } @@ -26936,40 +28238,47 @@ class ISchema { break; case 3: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->dbName); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: if ($ftype == TType::I32) { $xfer += $input->readI32($this->compatibility); } else { $xfer += $input->skip($ftype); } break; - case 5: + case 6: if ($ftype == TType::I32) { $xfer += $input->readI32($this->validationLevel); } else { $xfer += $input->skip($ftype); } break; - case 6: + case 7: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->canEvolve); } else { $xfer += $input->skip($ftype); } break; - case 7: + case 8: if ($ftype == TType::STRING) { $xfer += $input->readString($this->schemaGroup); } else { $xfer += $input->skip($ftype); } break; - case 8: + case 9: if ($ftype == TType::STRING) { $xfer += $input->readString($this->description); } else { @@ -26999,33 +28308,38 @@ class ISchema { $xfer += $output->writeString($this->name); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 3); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->dbName !== null) { - $xfer += $output->writeFieldBegin('dbName', TType::STRING, 3); + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 4); $xfer += $output->writeString($this->dbName); $xfer += $output->writeFieldEnd(); } if ($this->compatibility !== null) { - $xfer += $output->writeFieldBegin('compatibility', TType::I32, 4); + $xfer += $output->writeFieldBegin('compatibility', TType::I32, 5); $xfer += $output->writeI32($this->compatibility); $xfer += $output->writeFieldEnd(); } if ($this->validationLevel !== null) { - $xfer += $output->writeFieldBegin('validationLevel', TType::I32, 5); + $xfer += $output->writeFieldBegin('validationLevel', TType::I32, 6); $xfer += $output->writeI32($this->validationLevel); $xfer += $output->writeFieldEnd(); } if ($this->canEvolve !== null) { - $xfer += $output->writeFieldBegin('canEvolve', TType::BOOL, 6); + $xfer += $output->writeFieldBegin('canEvolve', TType::BOOL, 7); $xfer += $output->writeBool($this->canEvolve); $xfer += $output->writeFieldEnd(); } if ($this->schemaGroup !== null) { - $xfer += $output->writeFieldBegin('schemaGroup', TType::STRING, 7); + $xfer += $output->writeFieldBegin('schemaGroup', TType::STRING, 8); $xfer += $output->writeString($this->schemaGroup); $xfer += $output->writeFieldEnd(); } if ($this->description !== null) { - $xfer += $output->writeFieldBegin('description', TType::STRING, 8); + $xfer += $output->writeFieldBegin('description', TType::STRING, 9); $xfer += $output->writeString($this->description); $xfer += $output->writeFieldEnd(); } @@ -27042,6 +28356,10 @@ class ISchemaName { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $dbName = null; /** * @var string @@ -27052,16 +28370,23 @@ class ISchemaName { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'dbName', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 3 => array( 'var' => 'schemaName', 'type' => TType::STRING, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['dbName'])) { $this->dbName = $vals['dbName']; } @@ -27092,13 +28417,20 @@ class ISchemaName { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->dbName); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { $xfer += $input->readString($this->schemaName); } else { $xfer += $input->skip($ftype); @@ -27117,13 +28449,18 @@ class ISchemaName { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('ISchemaName'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->dbName !== null) { - $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 2); $xfer += $output->writeString($this->dbName); $xfer += $output->writeFieldEnd(); } if ($this->schemaName !== null) { - $xfer += $output->writeFieldBegin('schemaName', TType::STRING, 2); + $xfer += $output->writeFieldBegin('schemaName', TType::STRING, 3); $xfer += $output->writeString($this->schemaName); $xfer += $output->writeFieldEnd(); } @@ -27416,15 +28753,15 @@ class SchemaVersion { case 4: if ($ftype == TType::LST) { $this->cols = array(); - $_size778 = 0; - $_etype781 = 0; - $xfer += $input->readListBegin($_etype781, $_size778); - for ($_i782 = 0; $_i782 < $_size778; ++$_i782) + $_size785 = 0; + $_etype788 = 0; + $xfer += $input->readListBegin($_etype788, $_size785); + for ($_i789 = 0; $_i789 < $_size785; ++$_i789) { - $elem783 = null; - $elem783 = new \metastore\FieldSchema(); - $xfer += $elem783->read($input); - $this->cols []= $elem783; + $elem790 = null; + $elem790 = new \metastore\FieldSchema(); + $xfer += $elem790->read($input); + $this->cols []= $elem790; } $xfer += $input->readListEnd(); } else { @@ -27513,9 +28850,9 @@ class SchemaVersion { { $output->writeListBegin(TType::STRUCT, count($this->cols)); { - foreach ($this->cols as $iter784) + foreach ($this->cols as $iter791) { - $xfer += $iter784->write($output); + $xfer += $iter791->write($output); } } $output->writeListEnd(); @@ -27837,15 +29174,15 @@ class FindSchemasByColsResp { case 1: if ($ftype == TType::LST) { $this->schemaVersions = array(); - $_size785 = 0; - $_etype788 = 0; - $xfer += $input->readListBegin($_etype788, $_size785); - for ($_i789 = 0; $_i789 < $_size785; ++$_i789) + $_size792 = 0; + $_etype795 = 0; + $xfer += $input->readListBegin($_etype795, $_size792); + for ($_i796 = 0; $_i796 < $_size792; ++$_i796) { - $elem790 = null; - $elem790 = new \metastore\SchemaVersionDescriptor(); - $xfer += $elem790->read($input); - $this->schemaVersions []= $elem790; + $elem797 = null; + $elem797 = new \metastore\SchemaVersionDescriptor(); + $xfer += $elem797->read($input); + $this->schemaVersions []= $elem797; } $xfer += $input->readListEnd(); } else { @@ -27873,9 +29210,9 @@ class FindSchemasByColsResp { { $output->writeListBegin(TType::STRUCT, count($this->schemaVersions)); { - foreach ($this->schemaVersions as $iter791) + foreach ($this->schemaVersions as $iter798) { - $xfer += $iter791->write($output); + $xfer += $iter798->write($output); } } $output->writeListEnd(); diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index 1b3ebcf6ef..d39690f31c 100755 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -26,6 +26,10 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print('Functions:') print(' string getMetaConf(string key)') print(' void setMetaConf(string key, string value)') + print(' void create_catalog(CreateCatalogRequest catalog)') + print(' GetCatalogResponse get_catalog(GetCatalogRequest catName)') + print(' GetCatalogsResponse get_catalogs()') + print(' void drop_catalog(DropCatalogRequest catName)') print(' void create_database(Database database)') print(' Database get_database(string name)') print(' void drop_database(string name, bool deleteData, bool cascade)') @@ -63,7 +67,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' GetTableResult get_table_req(GetTableRequest req)') print(' GetTablesResult get_table_objects_by_name_req(GetTablesRequest req)') print(' get_materialization_invalidation_info(string dbname, tbl_names)') - print(' void update_creation_metadata(string dbname, string tbl_name, CreationMetadata creation_metadata)') + print(' void update_creation_metadata(string catName, string dbname, string tbl_name, CreationMetadata creation_metadata)') print(' get_table_names_by_filter(string dbname, string filter, i16 max_tables)') print(' void alter_table(string dbname, string tbl_name, Table new_tbl)') print(' void alter_table_with_environment_context(string dbname, string tbl_name, Table new_tbl, EnvironmentContext environment_context)') @@ -301,6 +305,30 @@ elif cmd == 'setMetaConf': sys.exit(1) pp.pprint(client.setMetaConf(args[0],args[1],)) +elif cmd == 'create_catalog': + if len(args) != 1: + print('create_catalog requires 1 args') + sys.exit(1) + pp.pprint(client.create_catalog(eval(args[0]),)) + +elif cmd == 'get_catalog': + if len(args) != 1: + print('get_catalog requires 1 args') + sys.exit(1) + pp.pprint(client.get_catalog(eval(args[0]),)) + +elif cmd == 'get_catalogs': + if len(args) != 0: + print('get_catalogs requires 0 args') + sys.exit(1) + pp.pprint(client.get_catalogs()) + +elif cmd == 'drop_catalog': + if len(args) != 1: + print('drop_catalog requires 1 args') + sys.exit(1) + pp.pprint(client.drop_catalog(eval(args[0]),)) + elif cmd == 'create_database': if len(args) != 1: print('create_database requires 1 args') @@ -524,10 +552,10 @@ elif cmd == 'get_materialization_invalidation_info': pp.pprint(client.get_materialization_invalidation_info(args[0],eval(args[1]),)) elif cmd == 'update_creation_metadata': - if len(args) != 3: - print('update_creation_metadata requires 3 args') + if len(args) != 4: + print('update_creation_metadata requires 4 args') sys.exit(1) - pp.pprint(client.update_creation_metadata(args[0],args[1],eval(args[2]),)) + pp.pprint(client.update_creation_metadata(args[0],args[1],args[2],eval(args[3]),)) elif cmd == 'get_table_names_by_filter': if len(args) != 3: diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index cf36654b51..bded16a54e 100644 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -38,6 +38,30 @@ def setMetaConf(self, key, value): """ pass + def create_catalog(self, catalog): + """ + Parameters: + - catalog + """ + pass + + def get_catalog(self, catName): + """ + Parameters: + - catName + """ + pass + + def get_catalogs(self): + pass + + def drop_catalog(self, catName): + """ + Parameters: + - catName + """ + pass + def create_database(self, database): """ Parameters: @@ -324,9 +348,10 @@ def get_materialization_invalidation_info(self, dbname, tbl_names): """ pass - def update_creation_metadata(self, dbname, tbl_name, creation_metadata): + def update_creation_metadata(self, catName, dbname, tbl_name, creation_metadata): """ Parameters: + - catName - dbname - tbl_name - creation_metadata @@ -1597,6 +1622,139 @@ def recv_setMetaConf(self): raise result.o1 return + def create_catalog(self, catalog): + """ + Parameters: + - catalog + """ + self.send_create_catalog(catalog) + self.recv_create_catalog() + + def send_create_catalog(self, catalog): + self._oprot.writeMessageBegin('create_catalog', TMessageType.CALL, self._seqid) + args = create_catalog_args() + args.catalog = catalog + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_catalog(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_catalog_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def get_catalog(self, catName): + """ + Parameters: + - catName + """ + self.send_get_catalog(catName) + return self.recv_get_catalog() + + def send_get_catalog(self, catName): + self._oprot.writeMessageBegin('get_catalog', TMessageType.CALL, self._seqid) + args = get_catalog_args() + args.catName = catName + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_catalog(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_catalog_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_catalog failed: unknown result") + + def get_catalogs(self): + self.send_get_catalogs() + return self.recv_get_catalogs() + + def send_get_catalogs(self): + self._oprot.writeMessageBegin('get_catalogs', TMessageType.CALL, self._seqid) + args = get_catalogs_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_catalogs(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_catalogs_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_catalogs failed: unknown result") + + def drop_catalog(self, catName): + """ + Parameters: + - catName + """ + self.send_drop_catalog(catName) + self.recv_drop_catalog() + + def send_drop_catalog(self, catName): + self._oprot.writeMessageBegin('drop_catalog', TMessageType.CALL, self._seqid) + args = drop_catalog_args() + args.catName = catName + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_catalog(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_catalog_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + def create_database(self, database): """ Parameters: @@ -2925,19 +3083,21 @@ def recv_get_materialization_invalidation_info(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_materialization_invalidation_info failed: unknown result") - def update_creation_metadata(self, dbname, tbl_name, creation_metadata): + def update_creation_metadata(self, catName, dbname, tbl_name, creation_metadata): """ Parameters: + - catName - dbname - tbl_name - creation_metadata """ - self.send_update_creation_metadata(dbname, tbl_name, creation_metadata) + self.send_update_creation_metadata(catName, dbname, tbl_name, creation_metadata) self.recv_update_creation_metadata() - def send_update_creation_metadata(self, dbname, tbl_name, creation_metadata): + def send_update_creation_metadata(self, catName, dbname, tbl_name, creation_metadata): self._oprot.writeMessageBegin('update_creation_metadata', TMessageType.CALL, self._seqid) args = update_creation_metadata_args() + args.catName = catName args.dbname = dbname args.tbl_name = tbl_name args.creation_metadata = creation_metadata @@ -8557,6 +8717,10 @@ def __init__(self, handler): fb303.FacebookService.Processor.__init__(self, handler) self._processMap["getMetaConf"] = Processor.process_getMetaConf self._processMap["setMetaConf"] = Processor.process_setMetaConf + self._processMap["create_catalog"] = Processor.process_create_catalog + self._processMap["get_catalog"] = Processor.process_get_catalog + self._processMap["get_catalogs"] = Processor.process_get_catalogs + self._processMap["drop_catalog"] = Processor.process_drop_catalog self._processMap["create_database"] = Processor.process_create_database self._processMap["get_database"] = Processor.process_get_database self._processMap["drop_database"] = Processor.process_drop_database @@ -8811,6 +8975,109 @@ def process_setMetaConf(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_create_catalog(self, seqid, iprot, oprot): + args = create_catalog_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_catalog_result() + try: + self._handler.create_catalog(args.catalog) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("create_catalog", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_catalog(self, seqid, iprot, oprot): + args = get_catalog_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_catalog_result() + try: + result.success = self._handler.get_catalog(args.catName) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_catalog", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_catalogs(self, seqid, iprot, oprot): + args = get_catalogs_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_catalogs_result() + try: + result.success = self._handler.get_catalogs() + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_catalogs", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_catalog(self, seqid, iprot, oprot): + args = drop_catalog_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_catalog_result() + try: + self._handler.drop_catalog(args.catName) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("drop_catalog", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_create_database(self, seqid, iprot, oprot): args = create_database_args() args.read(iprot) @@ -9754,7 +10021,7 @@ def process_update_creation_metadata(self, seqid, iprot, oprot): iprot.readMessageEnd() result = update_creation_metadata_result() try: - self._handler.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata) + self._handler.update_creation_metadata(args.catName, args.dbname, args.tbl_name, args.creation_metadata) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -13912,19 +14179,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class create_database_args: +class create_catalog_args: """ Attributes: - - database + - catalog """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'database', (Database, Database.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'catalog', (CreateCatalogRequest, CreateCatalogRequest.thrift_spec), None, ), # 1 ) - def __init__(self, database=None,): - self.database = database + def __init__(self, catalog=None,): + self.catalog = catalog def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -13937,8 +14204,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.database = Database() - self.database.read(iprot) + self.catalog = CreateCatalogRequest() + self.catalog.read(iprot) else: iprot.skip(ftype) else: @@ -13950,10 +14217,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('create_database_args') - if self.database is not None: - oprot.writeFieldBegin('database', TType.STRUCT, 1) - self.database.write(oprot) + oprot.writeStructBegin('create_catalog_args') + if self.catalog is not None: + oprot.writeFieldBegin('catalog', TType.STRUCT, 1) + self.catalog.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -13964,7 +14231,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.database) + value = (value * 31) ^ hash(self.catalog) return value def __repr__(self): @@ -13978,7 +14245,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class create_database_result: +class create_catalog_result: """ Attributes: - o1 @@ -14034,7 +14301,451 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('create_database_result') + oprot.writeStructBegin('create_catalog_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_catalog_args: + """ + Attributes: + - catName + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'catName', (GetCatalogRequest, GetCatalogRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, catName=None,): + self.catName = catName + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.catName = GetCatalogRequest() + self.catName.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_catalog_args') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRUCT, 1) + self.catName.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.catName) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_catalog_result: + """ + Attributes: + - success + - o1 + - o2 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (GetCatalogResponse, GetCatalogResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + ) + + def __init__(self, success=None, o1=None, o2=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetCatalogResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_catalog_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_catalogs_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_catalogs_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_catalogs_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (GetCatalogsResponse, GetCatalogsResponse.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetCatalogsResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_catalogs_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class drop_catalog_args: + """ + Attributes: + - catName + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'catName', (DropCatalogRequest, DropCatalogRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, catName=None,): + self.catName = catName + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.catName = DropCatalogRequest() + self.catName.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('drop_catalog_args') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRUCT, 1) + self.catName.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.catName) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class drop_catalog_result: + """ + Attributes: + - o1 + - o2 + - o3 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 + ) + + def __init__(self, o1=None, o2=None, o3=None,): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('drop_catalog_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -14072,19 +14783,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_database_args: +class create_database_args: """ Attributes: - - name + - database """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'name', None, None, ), # 1 + (1, TType.STRUCT, 'database', (Database, Database.thrift_spec), None, ), # 1 ) - def __init__(self, name=None,): - self.name = name + def __init__(self, database=None,): + self.database = database def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -14096,8 +14807,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.name = iprot.readString() + if ftype == TType.STRUCT: + self.database = Database() + self.database.read(iprot) else: iprot.skip(ftype) else: @@ -14109,10 +14821,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_database_args') - if self.name is not None: - oprot.writeFieldBegin('name', TType.STRING, 1) - oprot.writeString(self.name) + oprot.writeStructBegin('create_database_args') + if self.database is not None: + oprot.writeFieldBegin('database', TType.STRUCT, 1) + self.database.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14123,7 +14835,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.name) + value = (value * 31) ^ hash(self.database) return value def __repr__(self): @@ -14137,24 +14849,25 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_database_result: +class create_database_result: """ Attributes: - - success - o1 - o2 + - o3 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (Database, Database.thrift_spec), None, ), # 0 - (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + None, # 0 + (1, TType.STRUCT, 'o1', (AlreadyExistsException, AlreadyExistsException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None, o1=None, o2=None,): - self.success = success + def __init__(self, o1=None, o2=None, o3=None,): self.o1 = o1 self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -14165,24 +14878,24 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.STRUCT: - self.success = Database() - self.success.read(iprot) - else: - iprot.skip(ftype) - elif fid == 1: + if fid == 1: if ftype == TType.STRUCT: - self.o1 = NoSuchObjectException() + self.o1 = AlreadyExistsException() self.o1.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = MetaException() + self.o2 = InvalidObjectException() self.o2.read(iprot) else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -14192,11 +14905,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_database_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) - oprot.writeFieldEnd() + oprot.writeStructBegin('create_database_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -14205,6 +14914,10 @@ def write(self, oprot): oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14214,9 +14927,9 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -14230,25 +14943,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class drop_database_args: +class get_database_args: """ Attributes: - name - - deleteData - - cascade """ thrift_spec = ( None, # 0 (1, TType.STRING, 'name', None, None, ), # 1 - (2, TType.BOOL, 'deleteData', None, None, ), # 2 - (3, TType.BOOL, 'cascade', None, None, ), # 3 ) - def __init__(self, name=None, deleteData=None, cascade=None,): + def __init__(self, name=None,): self.name = name - self.deleteData = deleteData - self.cascade = cascade def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -14264,16 +14971,6 @@ def read(self, iprot): self.name = iprot.readString() else: iprot.skip(ftype) - elif fid == 2: - if ftype == TType.BOOL: - self.deleteData = iprot.readBool() - else: - iprot.skip(ftype) - elif fid == 3: - if ftype == TType.BOOL: - self.cascade = iprot.readBool() - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -14283,19 +14980,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('drop_database_args') + oprot.writeStructBegin('get_database_args') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name) oprot.writeFieldEnd() - if self.deleteData is not None: - oprot.writeFieldBegin('deleteData', TType.BOOL, 2) - oprot.writeBool(self.deleteData) - oprot.writeFieldEnd() - if self.cascade is not None: - oprot.writeFieldBegin('cascade', TType.BOOL, 3) - oprot.writeBool(self.cascade) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14306,8 +14995,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.name) - value = (value * 31) ^ hash(self.deleteData) - value = (value * 31) ^ hash(self.cascade) return value def __repr__(self): @@ -14321,25 +15008,24 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class drop_database_result: +class get_database_result: """ Attributes: + - success - o1 - o2 - - o3 """ thrift_spec = ( - None, # 0 + (0, TType.STRUCT, 'success', (Database, Database.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 - (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 ) - def __init__(self, o1=None, o2=None, o3=None,): + def __init__(self, success=None, o1=None, o2=None,): + self.success = success self.o1 = o1 self.o2 = o2 - self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -14350,7 +15036,13 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: + if fid == 0: + if ftype == TType.STRUCT: + self.success = Database() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: if ftype == TType.STRUCT: self.o1 = NoSuchObjectException() self.o1.read(iprot) @@ -14358,16 +15050,10 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = InvalidOperationException() + self.o2 = MetaException() self.o2.read(iprot) else: iprot.skip(ftype) - elif fid == 3: - if ftype == TType.STRUCT: - self.o3 = MetaException() - self.o3.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -14377,7 +15063,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('drop_database_result') + oprot.writeStructBegin('get_database_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -14386,10 +15076,6 @@ def write(self, oprot): oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() - if self.o3 is not None: - oprot.writeFieldBegin('o3', TType.STRUCT, 3) - self.o3.write(oprot) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14399,9 +15085,9 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) value = (value * 31) ^ hash(self.o2) - value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -14415,19 +15101,25 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_databases_args: +class drop_database_args: """ Attributes: - - pattern + - name + - deleteData + - cascade """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'pattern', None, None, ), # 1 + (1, TType.STRING, 'name', None, None, ), # 1 + (2, TType.BOOL, 'deleteData', None, None, ), # 2 + (3, TType.BOOL, 'cascade', None, None, ), # 3 ) - def __init__(self, pattern=None,): - self.pattern = pattern + def __init__(self, name=None, deleteData=None, cascade=None,): + self.name = name + self.deleteData = deleteData + self.cascade = cascade def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -14440,7 +15132,17 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.pattern = iprot.readString() + self.name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.deleteData = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.cascade = iprot.readBool() else: iprot.skip(ftype) else: @@ -14452,10 +15154,18 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_databases_args') - if self.pattern is not None: - oprot.writeFieldBegin('pattern', TType.STRING, 1) - oprot.writeString(self.pattern) + oprot.writeStructBegin('drop_database_args') + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 1) + oprot.writeString(self.name) + oprot.writeFieldEnd() + if self.deleteData is not None: + oprot.writeFieldBegin('deleteData', TType.BOOL, 2) + oprot.writeBool(self.deleteData) + oprot.writeFieldEnd() + if self.cascade is not None: + oprot.writeFieldBegin('cascade', TType.BOOL, 3) + oprot.writeBool(self.cascade) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14466,7 +15176,9 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.pattern) + value = (value * 31) ^ hash(self.name) + value = (value * 31) ^ hash(self.deleteData) + value = (value * 31) ^ hash(self.cascade) return value def __repr__(self): @@ -14480,21 +15192,25 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_databases_result: +class drop_database_result: """ Attributes: - - success - o1 + - o2 + - o3 """ thrift_spec = ( - (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None, o1=None,): - self.success = success + def __init__(self, o1=None, o2=None, o3=None,): self.o1 = o1 + self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -14505,20 +15221,22 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.LIST: - self.success = [] - (_etype791, _size788) = iprot.readListBegin() - for _i792 in xrange(_size788): - _elem793 = iprot.readString() - self.success.append(_elem793) - iprot.readListEnd() + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) else: iprot.skip(ftype) - elif fid == 1: + elif fid == 2: if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) + self.o2 = InvalidOperationException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) else: iprot.skip(ftype) else: @@ -14530,18 +15248,19 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_databases_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.LIST, 0) - oprot.writeListBegin(TType.STRING, len(self.success)) - for iter794 in self.success: - oprot.writeString(iter794) - oprot.writeListEnd() - oprot.writeFieldEnd() + oprot.writeStructBegin('drop_database_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14551,8 +15270,9 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -14566,11 +15286,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_all_databases_args: +class get_databases_args: + """ + Attributes: + - pattern + """ thrift_spec = ( + None, # 0 + (1, TType.STRING, 'pattern', None, None, ), # 1 ) + def __init__(self, pattern=None,): + self.pattern = pattern + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -14580,6 +15309,11 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRING: + self.pattern = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -14589,7 +15323,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_all_databases_args') + oprot.writeStructBegin('get_databases_args') + if self.pattern is not None: + oprot.writeFieldBegin('pattern', TType.STRING, 1) + oprot.writeString(self.pattern) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14599,6 +15337,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.pattern) return value def __repr__(self): @@ -14612,7 +15351,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_all_databases_result: +class get_databases_result: """ Attributes: - success @@ -14662,7 +15401,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_all_databases_result') + oprot.writeStructBegin('get_databases_result') if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) @@ -14698,6 +15437,138 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class get_all_databases_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_all_databases_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_all_databases_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype805, _size802) = iprot.readListBegin() + for _i806 in xrange(_size802): + _elem807 = iprot.readString() + self.success.append(_elem807) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_all_databases_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter808 in self.success: + oprot.writeString(iter808) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class alter_database_args: """ Attributes: @@ -15437,12 +16308,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype803, _vtype804, _size802 ) = iprot.readMapBegin() - for _i806 in xrange(_size802): - _key807 = iprot.readString() - _val808 = Type() - _val808.read(iprot) - self.success[_key807] = _val808 + (_ktype810, _vtype811, _size809 ) = iprot.readMapBegin() + for _i813 in xrange(_size809): + _key814 = iprot.readString() + _val815 = Type() + _val815.read(iprot) + self.success[_key814] = _val815 iprot.readMapEnd() else: iprot.skip(ftype) @@ -15465,9 +16336,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter809,viter810 in self.success.items(): - oprot.writeString(kiter809) - viter810.write(oprot) + for kiter816,viter817 in self.success.items(): + oprot.writeString(kiter816) + viter817.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -15610,11 +16481,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype814, _size811) = iprot.readListBegin() - for _i815 in xrange(_size811): - _elem816 = FieldSchema() - _elem816.read(iprot) - self.success.append(_elem816) + (_etype821, _size818) = iprot.readListBegin() + for _i822 in xrange(_size818): + _elem823 = FieldSchema() + _elem823.read(iprot) + self.success.append(_elem823) iprot.readListEnd() else: iprot.skip(ftype) @@ -15649,8 +16520,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter817 in self.success: - iter817.write(oprot) + for iter824 in self.success: + iter824.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15817,11 +16688,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype821, _size818) = iprot.readListBegin() - for _i822 in xrange(_size818): - _elem823 = FieldSchema() - _elem823.read(iprot) - self.success.append(_elem823) + (_etype828, _size825) = iprot.readListBegin() + for _i829 in xrange(_size825): + _elem830 = FieldSchema() + _elem830.read(iprot) + self.success.append(_elem830) iprot.readListEnd() else: iprot.skip(ftype) @@ -15856,8 +16727,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter824 in self.success: - iter824.write(oprot) + for iter831 in self.success: + iter831.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16010,11 +16881,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype828, _size825) = iprot.readListBegin() - for _i829 in xrange(_size825): - _elem830 = FieldSchema() - _elem830.read(iprot) - self.success.append(_elem830) + (_etype835, _size832) = iprot.readListBegin() + for _i836 in xrange(_size832): + _elem837 = FieldSchema() + _elem837.read(iprot) + self.success.append(_elem837) iprot.readListEnd() else: iprot.skip(ftype) @@ -16049,8 +16920,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter831 in self.success: - iter831.write(oprot) + for iter838 in self.success: + iter838.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16217,11 +17088,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype835, _size832) = iprot.readListBegin() - for _i836 in xrange(_size832): - _elem837 = FieldSchema() - _elem837.read(iprot) - self.success.append(_elem837) + (_etype842, _size839) = iprot.readListBegin() + for _i843 in xrange(_size839): + _elem844 = FieldSchema() + _elem844.read(iprot) + self.success.append(_elem844) iprot.readListEnd() else: iprot.skip(ftype) @@ -16256,8 +17127,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter838 in self.success: - iter838.write(oprot) + for iter845 in self.success: + iter845.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16710,66 +17581,66 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.primaryKeys = [] - (_etype842, _size839) = iprot.readListBegin() - for _i843 in xrange(_size839): - _elem844 = SQLPrimaryKey() - _elem844.read(iprot) - self.primaryKeys.append(_elem844) + (_etype849, _size846) = iprot.readListBegin() + for _i850 in xrange(_size846): + _elem851 = SQLPrimaryKey() + _elem851.read(iprot) + self.primaryKeys.append(_elem851) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.foreignKeys = [] - (_etype848, _size845) = iprot.readListBegin() - for _i849 in xrange(_size845): - _elem850 = SQLForeignKey() - _elem850.read(iprot) - self.foreignKeys.append(_elem850) + (_etype855, _size852) = iprot.readListBegin() + for _i856 in xrange(_size852): + _elem857 = SQLForeignKey() + _elem857.read(iprot) + self.foreignKeys.append(_elem857) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype854, _size851) = iprot.readListBegin() - for _i855 in xrange(_size851): - _elem856 = SQLUniqueConstraint() - _elem856.read(iprot) - self.uniqueConstraints.append(_elem856) + (_etype861, _size858) = iprot.readListBegin() + for _i862 in xrange(_size858): + _elem863 = SQLUniqueConstraint() + _elem863.read(iprot) + self.uniqueConstraints.append(_elem863) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype860, _size857) = iprot.readListBegin() - for _i861 in xrange(_size857): - _elem862 = SQLNotNullConstraint() - _elem862.read(iprot) - self.notNullConstraints.append(_elem862) + (_etype867, _size864) = iprot.readListBegin() + for _i868 in xrange(_size864): + _elem869 = SQLNotNullConstraint() + _elem869.read(iprot) + self.notNullConstraints.append(_elem869) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.LIST: self.defaultConstraints = [] - (_etype866, _size863) = iprot.readListBegin() - for _i867 in xrange(_size863): - _elem868 = SQLDefaultConstraint() - _elem868.read(iprot) - self.defaultConstraints.append(_elem868) + (_etype873, _size870) = iprot.readListBegin() + for _i874 in xrange(_size870): + _elem875 = SQLDefaultConstraint() + _elem875.read(iprot) + self.defaultConstraints.append(_elem875) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST: self.checkConstraints = [] - (_etype872, _size869) = iprot.readListBegin() - for _i873 in xrange(_size869): - _elem874 = SQLCheckConstraint() - _elem874.read(iprot) - self.checkConstraints.append(_elem874) + (_etype879, _size876) = iprot.readListBegin() + for _i880 in xrange(_size876): + _elem881 = SQLCheckConstraint() + _elem881.read(iprot) + self.checkConstraints.append(_elem881) iprot.readListEnd() else: iprot.skip(ftype) @@ -16790,43 +17661,43 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter875 in self.primaryKeys: - iter875.write(oprot) + for iter882 in self.primaryKeys: + iter882.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter876 in self.foreignKeys: - iter876.write(oprot) + for iter883 in self.foreignKeys: + iter883.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter877 in self.uniqueConstraints: - iter877.write(oprot) + for iter884 in self.uniqueConstraints: + iter884.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter878 in self.notNullConstraints: - iter878.write(oprot) + for iter885 in self.notNullConstraints: + iter885.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.defaultConstraints is not None: oprot.writeFieldBegin('defaultConstraints', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) - for iter879 in self.defaultConstraints: - iter879.write(oprot) + for iter886 in self.defaultConstraints: + iter886.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.checkConstraints is not None: oprot.writeFieldBegin('checkConstraints', TType.LIST, 7) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) - for iter880 in self.checkConstraints: - iter880.write(oprot) + for iter887 in self.checkConstraints: + iter887.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18386,10 +19257,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partNames = [] - (_etype884, _size881) = iprot.readListBegin() - for _i885 in xrange(_size881): - _elem886 = iprot.readString() - self.partNames.append(_elem886) + (_etype891, _size888) = iprot.readListBegin() + for _i892 in xrange(_size888): + _elem893 = iprot.readString() + self.partNames.append(_elem893) iprot.readListEnd() else: iprot.skip(ftype) @@ -18414,8 +19285,8 @@ def write(self, oprot): if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter887 in self.partNames: - oprot.writeString(iter887) + for iter894 in self.partNames: + oprot.writeString(iter894) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18615,10 +19486,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype891, _size888) = iprot.readListBegin() - for _i892 in xrange(_size888): - _elem893 = iprot.readString() - self.success.append(_elem893) + (_etype898, _size895) = iprot.readListBegin() + for _i899 in xrange(_size895): + _elem900 = iprot.readString() + self.success.append(_elem900) iprot.readListEnd() else: iprot.skip(ftype) @@ -18641,8 +19512,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter894 in self.success: - oprot.writeString(iter894) + for iter901 in self.success: + oprot.writeString(iter901) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18792,10 +19663,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype898, _size895) = iprot.readListBegin() - for _i899 in xrange(_size895): - _elem900 = iprot.readString() - self.success.append(_elem900) + (_etype905, _size902) = iprot.readListBegin() + for _i906 in xrange(_size902): + _elem907 = iprot.readString() + self.success.append(_elem907) iprot.readListEnd() else: iprot.skip(ftype) @@ -18818,8 +19689,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter901 in self.success: - oprot.writeString(iter901) + for iter908 in self.success: + oprot.writeString(iter908) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18943,10 +19814,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype905, _size902) = iprot.readListBegin() - for _i906 in xrange(_size902): - _elem907 = iprot.readString() - self.success.append(_elem907) + (_etype912, _size909) = iprot.readListBegin() + for _i913 in xrange(_size909): + _elem914 = iprot.readString() + self.success.append(_elem914) iprot.readListEnd() else: iprot.skip(ftype) @@ -18969,8 +19840,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter908 in self.success: - oprot.writeString(iter908) + for iter915 in self.success: + oprot.writeString(iter915) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19043,10 +19914,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype912, _size909) = iprot.readListBegin() - for _i913 in xrange(_size909): - _elem914 = iprot.readString() - self.tbl_types.append(_elem914) + (_etype919, _size916) = iprot.readListBegin() + for _i920 in xrange(_size916): + _elem921 = iprot.readString() + self.tbl_types.append(_elem921) iprot.readListEnd() else: iprot.skip(ftype) @@ -19071,8 +19942,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter915 in self.tbl_types: - oprot.writeString(iter915) + for iter922 in self.tbl_types: + oprot.writeString(iter922) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19128,11 +19999,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype919, _size916) = iprot.readListBegin() - for _i920 in xrange(_size916): - _elem921 = TableMeta() - _elem921.read(iprot) - self.success.append(_elem921) + (_etype926, _size923) = iprot.readListBegin() + for _i927 in xrange(_size923): + _elem928 = TableMeta() + _elem928.read(iprot) + self.success.append(_elem928) iprot.readListEnd() else: iprot.skip(ftype) @@ -19155,8 +20026,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter922 in self.success: - iter922.write(oprot) + for iter929 in self.success: + iter929.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19280,10 +20151,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype926, _size923) = iprot.readListBegin() - for _i927 in xrange(_size923): - _elem928 = iprot.readString() - self.success.append(_elem928) + (_etype933, _size930) = iprot.readListBegin() + for _i934 in xrange(_size930): + _elem935 = iprot.readString() + self.success.append(_elem935) iprot.readListEnd() else: iprot.skip(ftype) @@ -19306,8 +20177,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter929 in self.success: - oprot.writeString(iter929) + for iter936 in self.success: + oprot.writeString(iter936) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19543,10 +20414,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype933, _size930) = iprot.readListBegin() - for _i934 in xrange(_size930): - _elem935 = iprot.readString() - self.tbl_names.append(_elem935) + (_etype940, _size937) = iprot.readListBegin() + for _i941 in xrange(_size937): + _elem942 = iprot.readString() + self.tbl_names.append(_elem942) iprot.readListEnd() else: iprot.skip(ftype) @@ -19567,8 +20438,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter936 in self.tbl_names: - oprot.writeString(iter936) + for iter943 in self.tbl_names: + oprot.writeString(iter943) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19620,11 +20491,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype940, _size937) = iprot.readListBegin() - for _i941 in xrange(_size937): - _elem942 = Table() - _elem942.read(iprot) - self.success.append(_elem942) + (_etype947, _size944) = iprot.readListBegin() + for _i948 in xrange(_size944): + _elem949 = Table() + _elem949.read(iprot) + self.success.append(_elem949) iprot.readListEnd() else: iprot.skip(ftype) @@ -19641,8 +20512,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter943 in self.success: - iter943.write(oprot) + for iter950 in self.success: + iter950.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20034,10 +20905,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype947, _size944) = iprot.readListBegin() - for _i948 in xrange(_size944): - _elem949 = iprot.readString() - self.tbl_names.append(_elem949) + (_etype954, _size951) = iprot.readListBegin() + for _i955 in xrange(_size951): + _elem956 = iprot.readString() + self.tbl_names.append(_elem956) iprot.readListEnd() else: iprot.skip(ftype) @@ -20058,8 +20929,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter950 in self.tbl_names: - oprot.writeString(iter950) + for iter957 in self.tbl_names: + oprot.writeString(iter957) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20120,12 +20991,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype952, _vtype953, _size951 ) = iprot.readMapBegin() - for _i955 in xrange(_size951): - _key956 = iprot.readString() - _val957 = Materialization() - _val957.read(iprot) - self.success[_key956] = _val957 + (_ktype959, _vtype960, _size958 ) = iprot.readMapBegin() + for _i962 in xrange(_size958): + _key963 = iprot.readString() + _val964 = Materialization() + _val964.read(iprot) + self.success[_key963] = _val964 iprot.readMapEnd() else: iprot.skip(ftype) @@ -20160,9 +21031,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter958,viter959 in self.success.items(): - oprot.writeString(kiter958) - viter959.write(oprot) + for kiter965,viter966 in self.success.items(): + oprot.writeString(kiter965) + viter966.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20206,6 +21077,7 @@ def __ne__(self, other): class update_creation_metadata_args: """ Attributes: + - catName - dbname - tbl_name - creation_metadata @@ -20213,12 +21085,14 @@ class update_creation_metadata_args: thrift_spec = ( None, # 0 - (1, TType.STRING, 'dbname', None, None, ), # 1 - (2, TType.STRING, 'tbl_name', None, None, ), # 2 - (3, TType.STRUCT, 'creation_metadata', (CreationMetadata, CreationMetadata.thrift_spec), None, ), # 3 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'dbname', None, None, ), # 2 + (3, TType.STRING, 'tbl_name', None, None, ), # 3 + (4, TType.STRUCT, 'creation_metadata', (CreationMetadata, CreationMetadata.thrift_spec), None, ), # 4 ) - def __init__(self, dbname=None, tbl_name=None, creation_metadata=None,): + def __init__(self, catName=None, dbname=None, tbl_name=None, creation_metadata=None,): + self.catName = catName self.dbname = dbname self.tbl_name = tbl_name self.creation_metadata = creation_metadata @@ -20234,15 +21108,20 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.dbname = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: - self.tbl_name = iprot.readString() + self.dbname = iprot.readString() else: iprot.skip(ftype) elif fid == 3: + if ftype == TType.STRING: + self.tbl_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: if ftype == TType.STRUCT: self.creation_metadata = CreationMetadata() self.creation_metadata.read(iprot) @@ -20258,16 +21137,20 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('update_creation_metadata_args') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.dbname is not None: - oprot.writeFieldBegin('dbname', TType.STRING, 1) + oprot.writeFieldBegin('dbname', TType.STRING, 2) oprot.writeString(self.dbname) oprot.writeFieldEnd() if self.tbl_name is not None: - oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeFieldBegin('tbl_name', TType.STRING, 3) oprot.writeString(self.tbl_name) oprot.writeFieldEnd() if self.creation_metadata is not None: - oprot.writeFieldBegin('creation_metadata', TType.STRUCT, 3) + oprot.writeFieldBegin('creation_metadata', TType.STRUCT, 4) self.creation_metadata.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20279,6 +21162,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.dbname) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.creation_metadata) @@ -20514,10 +21398,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype963, _size960) = iprot.readListBegin() - for _i964 in xrange(_size960): - _elem965 = iprot.readString() - self.success.append(_elem965) + (_etype970, _size967) = iprot.readListBegin() + for _i971 in xrange(_size967): + _elem972 = iprot.readString() + self.success.append(_elem972) iprot.readListEnd() else: iprot.skip(ftype) @@ -20552,8 +21436,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter966 in self.success: - oprot.writeString(iter966) + for iter973 in self.success: + oprot.writeString(iter973) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21523,11 +22407,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype970, _size967) = iprot.readListBegin() - for _i971 in xrange(_size967): - _elem972 = Partition() - _elem972.read(iprot) - self.new_parts.append(_elem972) + (_etype977, _size974) = iprot.readListBegin() + for _i978 in xrange(_size974): + _elem979 = Partition() + _elem979.read(iprot) + self.new_parts.append(_elem979) iprot.readListEnd() else: iprot.skip(ftype) @@ -21544,8 +22428,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter973 in self.new_parts: - iter973.write(oprot) + for iter980 in self.new_parts: + iter980.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21703,11 +22587,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype977, _size974) = iprot.readListBegin() - for _i978 in xrange(_size974): - _elem979 = PartitionSpec() - _elem979.read(iprot) - self.new_parts.append(_elem979) + (_etype984, _size981) = iprot.readListBegin() + for _i985 in xrange(_size981): + _elem986 = PartitionSpec() + _elem986.read(iprot) + self.new_parts.append(_elem986) iprot.readListEnd() else: iprot.skip(ftype) @@ -21724,8 +22608,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter980 in self.new_parts: - iter980.write(oprot) + for iter987 in self.new_parts: + iter987.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21899,10 +22783,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype984, _size981) = iprot.readListBegin() - for _i985 in xrange(_size981): - _elem986 = iprot.readString() - self.part_vals.append(_elem986) + (_etype991, _size988) = iprot.readListBegin() + for _i992 in xrange(_size988): + _elem993 = iprot.readString() + self.part_vals.append(_elem993) iprot.readListEnd() else: iprot.skip(ftype) @@ -21927,8 +22811,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter987 in self.part_vals: - oprot.writeString(iter987) + for iter994 in self.part_vals: + oprot.writeString(iter994) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22281,10 +23165,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype991, _size988) = iprot.readListBegin() - for _i992 in xrange(_size988): - _elem993 = iprot.readString() - self.part_vals.append(_elem993) + (_etype998, _size995) = iprot.readListBegin() + for _i999 in xrange(_size995): + _elem1000 = iprot.readString() + self.part_vals.append(_elem1000) iprot.readListEnd() else: iprot.skip(ftype) @@ -22315,8 +23199,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter994 in self.part_vals: - oprot.writeString(iter994) + for iter1001 in self.part_vals: + oprot.writeString(iter1001) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -22911,10 +23795,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype998, _size995) = iprot.readListBegin() - for _i999 in xrange(_size995): - _elem1000 = iprot.readString() - self.part_vals.append(_elem1000) + (_etype1005, _size1002) = iprot.readListBegin() + for _i1006 in xrange(_size1002): + _elem1007 = iprot.readString() + self.part_vals.append(_elem1007) iprot.readListEnd() else: iprot.skip(ftype) @@ -22944,8 +23828,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1001 in self.part_vals: - oprot.writeString(iter1001) + for iter1008 in self.part_vals: + oprot.writeString(iter1008) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -23118,10 +24002,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1005, _size1002) = iprot.readListBegin() - for _i1006 in xrange(_size1002): - _elem1007 = iprot.readString() - self.part_vals.append(_elem1007) + (_etype1012, _size1009) = iprot.readListBegin() + for _i1013 in xrange(_size1009): + _elem1014 = iprot.readString() + self.part_vals.append(_elem1014) iprot.readListEnd() else: iprot.skip(ftype) @@ -23157,8 +24041,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1008 in self.part_vals: - oprot.writeString(iter1008) + for iter1015 in self.part_vals: + oprot.writeString(iter1015) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -23895,10 +24779,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1012, _size1009) = iprot.readListBegin() - for _i1013 in xrange(_size1009): - _elem1014 = iprot.readString() - self.part_vals.append(_elem1014) + (_etype1019, _size1016) = iprot.readListBegin() + for _i1020 in xrange(_size1016): + _elem1021 = iprot.readString() + self.part_vals.append(_elem1021) iprot.readListEnd() else: iprot.skip(ftype) @@ -23923,8 +24807,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1015 in self.part_vals: - oprot.writeString(iter1015) + for iter1022 in self.part_vals: + oprot.writeString(iter1022) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -24083,11 +24967,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1017, _vtype1018, _size1016 ) = iprot.readMapBegin() - for _i1020 in xrange(_size1016): - _key1021 = iprot.readString() - _val1022 = iprot.readString() - self.partitionSpecs[_key1021] = _val1022 + (_ktype1024, _vtype1025, _size1023 ) = iprot.readMapBegin() + for _i1027 in xrange(_size1023): + _key1028 = iprot.readString() + _val1029 = iprot.readString() + self.partitionSpecs[_key1028] = _val1029 iprot.readMapEnd() else: iprot.skip(ftype) @@ -24124,9 +25008,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1023,viter1024 in self.partitionSpecs.items(): - oprot.writeString(kiter1023) - oprot.writeString(viter1024) + for kiter1030,viter1031 in self.partitionSpecs.items(): + oprot.writeString(kiter1030) + oprot.writeString(viter1031) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -24331,11 +25215,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1026, _vtype1027, _size1025 ) = iprot.readMapBegin() - for _i1029 in xrange(_size1025): - _key1030 = iprot.readString() - _val1031 = iprot.readString() - self.partitionSpecs[_key1030] = _val1031 + (_ktype1033, _vtype1034, _size1032 ) = iprot.readMapBegin() + for _i1036 in xrange(_size1032): + _key1037 = iprot.readString() + _val1038 = iprot.readString() + self.partitionSpecs[_key1037] = _val1038 iprot.readMapEnd() else: iprot.skip(ftype) @@ -24372,9 +25256,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1032,viter1033 in self.partitionSpecs.items(): - oprot.writeString(kiter1032) - oprot.writeString(viter1033) + for kiter1039,viter1040 in self.partitionSpecs.items(): + oprot.writeString(kiter1039) + oprot.writeString(viter1040) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -24457,11 +25341,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1037, _size1034) = iprot.readListBegin() - for _i1038 in xrange(_size1034): - _elem1039 = Partition() - _elem1039.read(iprot) - self.success.append(_elem1039) + (_etype1044, _size1041) = iprot.readListBegin() + for _i1045 in xrange(_size1041): + _elem1046 = Partition() + _elem1046.read(iprot) + self.success.append(_elem1046) iprot.readListEnd() else: iprot.skip(ftype) @@ -24502,8 +25386,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1040 in self.success: - iter1040.write(oprot) + for iter1047 in self.success: + iter1047.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24597,10 +25481,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1044, _size1041) = iprot.readListBegin() - for _i1045 in xrange(_size1041): - _elem1046 = iprot.readString() - self.part_vals.append(_elem1046) + (_etype1051, _size1048) = iprot.readListBegin() + for _i1052 in xrange(_size1048): + _elem1053 = iprot.readString() + self.part_vals.append(_elem1053) iprot.readListEnd() else: iprot.skip(ftype) @@ -24612,10 +25496,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1050, _size1047) = iprot.readListBegin() - for _i1051 in xrange(_size1047): - _elem1052 = iprot.readString() - self.group_names.append(_elem1052) + (_etype1057, _size1054) = iprot.readListBegin() + for _i1058 in xrange(_size1054): + _elem1059 = iprot.readString() + self.group_names.append(_elem1059) iprot.readListEnd() else: iprot.skip(ftype) @@ -24640,8 +25524,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1053 in self.part_vals: - oprot.writeString(iter1053) + for iter1060 in self.part_vals: + oprot.writeString(iter1060) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -24651,8 +25535,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1054 in self.group_names: - oprot.writeString(iter1054) + for iter1061 in self.group_names: + oprot.writeString(iter1061) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -25081,11 +25965,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1058, _size1055) = iprot.readListBegin() - for _i1059 in xrange(_size1055): - _elem1060 = Partition() - _elem1060.read(iprot) - self.success.append(_elem1060) + (_etype1065, _size1062) = iprot.readListBegin() + for _i1066 in xrange(_size1062): + _elem1067 = Partition() + _elem1067.read(iprot) + self.success.append(_elem1067) iprot.readListEnd() else: iprot.skip(ftype) @@ -25114,8 +25998,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1061 in self.success: - iter1061.write(oprot) + for iter1068 in self.success: + iter1068.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25209,10 +26093,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1065, _size1062) = iprot.readListBegin() - for _i1066 in xrange(_size1062): - _elem1067 = iprot.readString() - self.group_names.append(_elem1067) + (_etype1072, _size1069) = iprot.readListBegin() + for _i1073 in xrange(_size1069): + _elem1074 = iprot.readString() + self.group_names.append(_elem1074) iprot.readListEnd() else: iprot.skip(ftype) @@ -25245,8 +26129,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1068 in self.group_names: - oprot.writeString(iter1068) + for iter1075 in self.group_names: + oprot.writeString(iter1075) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -25307,11 +26191,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1072, _size1069) = iprot.readListBegin() - for _i1073 in xrange(_size1069): - _elem1074 = Partition() - _elem1074.read(iprot) - self.success.append(_elem1074) + (_etype1079, _size1076) = iprot.readListBegin() + for _i1080 in xrange(_size1076): + _elem1081 = Partition() + _elem1081.read(iprot) + self.success.append(_elem1081) iprot.readListEnd() else: iprot.skip(ftype) @@ -25340,8 +26224,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1075 in self.success: - iter1075.write(oprot) + for iter1082 in self.success: + iter1082.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25499,11 +26383,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1079, _size1076) = iprot.readListBegin() - for _i1080 in xrange(_size1076): - _elem1081 = PartitionSpec() - _elem1081.read(iprot) - self.success.append(_elem1081) + (_etype1086, _size1083) = iprot.readListBegin() + for _i1087 in xrange(_size1083): + _elem1088 = PartitionSpec() + _elem1088.read(iprot) + self.success.append(_elem1088) iprot.readListEnd() else: iprot.skip(ftype) @@ -25532,8 +26416,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1082 in self.success: - iter1082.write(oprot) + for iter1089 in self.success: + iter1089.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25691,10 +26575,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1086, _size1083) = iprot.readListBegin() - for _i1087 in xrange(_size1083): - _elem1088 = iprot.readString() - self.success.append(_elem1088) + (_etype1093, _size1090) = iprot.readListBegin() + for _i1094 in xrange(_size1090): + _elem1095 = iprot.readString() + self.success.append(_elem1095) iprot.readListEnd() else: iprot.skip(ftype) @@ -25723,8 +26607,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1089 in self.success: - oprot.writeString(iter1089) + for iter1096 in self.success: + oprot.writeString(iter1096) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25964,10 +26848,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1093, _size1090) = iprot.readListBegin() - for _i1094 in xrange(_size1090): - _elem1095 = iprot.readString() - self.part_vals.append(_elem1095) + (_etype1100, _size1097) = iprot.readListBegin() + for _i1101 in xrange(_size1097): + _elem1102 = iprot.readString() + self.part_vals.append(_elem1102) iprot.readListEnd() else: iprot.skip(ftype) @@ -25997,8 +26881,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1096 in self.part_vals: - oprot.writeString(iter1096) + for iter1103 in self.part_vals: + oprot.writeString(iter1103) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -26062,11 +26946,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1100, _size1097) = iprot.readListBegin() - for _i1101 in xrange(_size1097): - _elem1102 = Partition() - _elem1102.read(iprot) - self.success.append(_elem1102) + (_etype1107, _size1104) = iprot.readListBegin() + for _i1108 in xrange(_size1104): + _elem1109 = Partition() + _elem1109.read(iprot) + self.success.append(_elem1109) iprot.readListEnd() else: iprot.skip(ftype) @@ -26095,8 +26979,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1103 in self.success: - iter1103.write(oprot) + for iter1110 in self.success: + iter1110.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26183,10 +27067,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1107, _size1104) = iprot.readListBegin() - for _i1108 in xrange(_size1104): - _elem1109 = iprot.readString() - self.part_vals.append(_elem1109) + (_etype1114, _size1111) = iprot.readListBegin() + for _i1115 in xrange(_size1111): + _elem1116 = iprot.readString() + self.part_vals.append(_elem1116) iprot.readListEnd() else: iprot.skip(ftype) @@ -26203,10 +27087,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype1113, _size1110) = iprot.readListBegin() - for _i1114 in xrange(_size1110): - _elem1115 = iprot.readString() - self.group_names.append(_elem1115) + (_etype1120, _size1117) = iprot.readListBegin() + for _i1121 in xrange(_size1117): + _elem1122 = iprot.readString() + self.group_names.append(_elem1122) iprot.readListEnd() else: iprot.skip(ftype) @@ -26231,8 +27115,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1116 in self.part_vals: - oprot.writeString(iter1116) + for iter1123 in self.part_vals: + oprot.writeString(iter1123) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -26246,8 +27130,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1117 in self.group_names: - oprot.writeString(iter1117) + for iter1124 in self.group_names: + oprot.writeString(iter1124) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26309,11 +27193,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1121, _size1118) = iprot.readListBegin() - for _i1122 in xrange(_size1118): - _elem1123 = Partition() - _elem1123.read(iprot) - self.success.append(_elem1123) + (_etype1128, _size1125) = iprot.readListBegin() + for _i1129 in xrange(_size1125): + _elem1130 = Partition() + _elem1130.read(iprot) + self.success.append(_elem1130) iprot.readListEnd() else: iprot.skip(ftype) @@ -26342,8 +27226,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1124 in self.success: - iter1124.write(oprot) + for iter1131 in self.success: + iter1131.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26424,10 +27308,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1128, _size1125) = iprot.readListBegin() - for _i1129 in xrange(_size1125): - _elem1130 = iprot.readString() - self.part_vals.append(_elem1130) + (_etype1135, _size1132) = iprot.readListBegin() + for _i1136 in xrange(_size1132): + _elem1137 = iprot.readString() + self.part_vals.append(_elem1137) iprot.readListEnd() else: iprot.skip(ftype) @@ -26457,8 +27341,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1131 in self.part_vals: - oprot.writeString(iter1131) + for iter1138 in self.part_vals: + oprot.writeString(iter1138) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -26522,10 +27406,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1135, _size1132) = iprot.readListBegin() - for _i1136 in xrange(_size1132): - _elem1137 = iprot.readString() - self.success.append(_elem1137) + (_etype1142, _size1139) = iprot.readListBegin() + for _i1143 in xrange(_size1139): + _elem1144 = iprot.readString() + self.success.append(_elem1144) iprot.readListEnd() else: iprot.skip(ftype) @@ -26554,8 +27438,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1138 in self.success: - oprot.writeString(iter1138) + for iter1145 in self.success: + oprot.writeString(iter1145) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26726,11 +27610,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1142, _size1139) = iprot.readListBegin() - for _i1143 in xrange(_size1139): - _elem1144 = Partition() - _elem1144.read(iprot) - self.success.append(_elem1144) + (_etype1149, _size1146) = iprot.readListBegin() + for _i1150 in xrange(_size1146): + _elem1151 = Partition() + _elem1151.read(iprot) + self.success.append(_elem1151) iprot.readListEnd() else: iprot.skip(ftype) @@ -26759,8 +27643,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1145 in self.success: - iter1145.write(oprot) + for iter1152 in self.success: + iter1152.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26931,11 +27815,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1149, _size1146) = iprot.readListBegin() - for _i1150 in xrange(_size1146): - _elem1151 = PartitionSpec() - _elem1151.read(iprot) - self.success.append(_elem1151) + (_etype1156, _size1153) = iprot.readListBegin() + for _i1157 in xrange(_size1153): + _elem1158 = PartitionSpec() + _elem1158.read(iprot) + self.success.append(_elem1158) iprot.readListEnd() else: iprot.skip(ftype) @@ -26964,8 +27848,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1152 in self.success: - iter1152.write(oprot) + for iter1159 in self.success: + iter1159.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27385,10 +28269,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype1156, _size1153) = iprot.readListBegin() - for _i1157 in xrange(_size1153): - _elem1158 = iprot.readString() - self.names.append(_elem1158) + (_etype1163, _size1160) = iprot.readListBegin() + for _i1164 in xrange(_size1160): + _elem1165 = iprot.readString() + self.names.append(_elem1165) iprot.readListEnd() else: iprot.skip(ftype) @@ -27413,8 +28297,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter1159 in self.names: - oprot.writeString(iter1159) + for iter1166 in self.names: + oprot.writeString(iter1166) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -27473,11 +28357,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1163, _size1160) = iprot.readListBegin() - for _i1164 in xrange(_size1160): - _elem1165 = Partition() - _elem1165.read(iprot) - self.success.append(_elem1165) + (_etype1170, _size1167) = iprot.readListBegin() + for _i1171 in xrange(_size1167): + _elem1172 = Partition() + _elem1172.read(iprot) + self.success.append(_elem1172) iprot.readListEnd() else: iprot.skip(ftype) @@ -27506,8 +28390,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1166 in self.success: - iter1166.write(oprot) + for iter1173 in self.success: + iter1173.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27757,11 +28641,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1170, _size1167) = iprot.readListBegin() - for _i1171 in xrange(_size1167): - _elem1172 = Partition() - _elem1172.read(iprot) - self.new_parts.append(_elem1172) + (_etype1177, _size1174) = iprot.readListBegin() + for _i1178 in xrange(_size1174): + _elem1179 = Partition() + _elem1179.read(iprot) + self.new_parts.append(_elem1179) iprot.readListEnd() else: iprot.skip(ftype) @@ -27786,8 +28670,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1173 in self.new_parts: - iter1173.write(oprot) + for iter1180 in self.new_parts: + iter1180.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -27940,11 +28824,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1177, _size1174) = iprot.readListBegin() - for _i1178 in xrange(_size1174): - _elem1179 = Partition() - _elem1179.read(iprot) - self.new_parts.append(_elem1179) + (_etype1184, _size1181) = iprot.readListBegin() + for _i1185 in xrange(_size1181): + _elem1186 = Partition() + _elem1186.read(iprot) + self.new_parts.append(_elem1186) iprot.readListEnd() else: iprot.skip(ftype) @@ -27975,8 +28859,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1180 in self.new_parts: - iter1180.write(oprot) + for iter1187 in self.new_parts: + iter1187.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -28320,10 +29204,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1184, _size1181) = iprot.readListBegin() - for _i1185 in xrange(_size1181): - _elem1186 = iprot.readString() - self.part_vals.append(_elem1186) + (_etype1191, _size1188) = iprot.readListBegin() + for _i1192 in xrange(_size1188): + _elem1193 = iprot.readString() + self.part_vals.append(_elem1193) iprot.readListEnd() else: iprot.skip(ftype) @@ -28354,8 +29238,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1187 in self.part_vals: - oprot.writeString(iter1187) + for iter1194 in self.part_vals: + oprot.writeString(iter1194) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -28497,10 +29381,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype1191, _size1188) = iprot.readListBegin() - for _i1192 in xrange(_size1188): - _elem1193 = iprot.readString() - self.part_vals.append(_elem1193) + (_etype1198, _size1195) = iprot.readListBegin() + for _i1199 in xrange(_size1195): + _elem1200 = iprot.readString() + self.part_vals.append(_elem1200) iprot.readListEnd() else: iprot.skip(ftype) @@ -28522,8 +29406,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1194 in self.part_vals: - oprot.writeString(iter1194) + for iter1201 in self.part_vals: + oprot.writeString(iter1201) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -28881,10 +29765,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1198, _size1195) = iprot.readListBegin() - for _i1199 in xrange(_size1195): - _elem1200 = iprot.readString() - self.success.append(_elem1200) + (_etype1205, _size1202) = iprot.readListBegin() + for _i1206 in xrange(_size1202): + _elem1207 = iprot.readString() + self.success.append(_elem1207) iprot.readListEnd() else: iprot.skip(ftype) @@ -28907,8 +29791,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1201 in self.success: - oprot.writeString(iter1201) + for iter1208 in self.success: + oprot.writeString(iter1208) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29032,11 +29916,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1203, _vtype1204, _size1202 ) = iprot.readMapBegin() - for _i1206 in xrange(_size1202): - _key1207 = iprot.readString() - _val1208 = iprot.readString() - self.success[_key1207] = _val1208 + (_ktype1210, _vtype1211, _size1209 ) = iprot.readMapBegin() + for _i1213 in xrange(_size1209): + _key1214 = iprot.readString() + _val1215 = iprot.readString() + self.success[_key1214] = _val1215 iprot.readMapEnd() else: iprot.skip(ftype) @@ -29059,9 +29943,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter1209,viter1210 in self.success.items(): - oprot.writeString(kiter1209) - oprot.writeString(viter1210) + for kiter1216,viter1217 in self.success.items(): + oprot.writeString(kiter1216) + oprot.writeString(viter1217) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29137,11 +30021,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1212, _vtype1213, _size1211 ) = iprot.readMapBegin() - for _i1215 in xrange(_size1211): - _key1216 = iprot.readString() - _val1217 = iprot.readString() - self.part_vals[_key1216] = _val1217 + (_ktype1219, _vtype1220, _size1218 ) = iprot.readMapBegin() + for _i1222 in xrange(_size1218): + _key1223 = iprot.readString() + _val1224 = iprot.readString() + self.part_vals[_key1223] = _val1224 iprot.readMapEnd() else: iprot.skip(ftype) @@ -29171,9 +30055,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1218,viter1219 in self.part_vals.items(): - oprot.writeString(kiter1218) - oprot.writeString(viter1219) + for kiter1225,viter1226 in self.part_vals.items(): + oprot.writeString(kiter1225) + oprot.writeString(viter1226) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -29387,11 +30271,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1221, _vtype1222, _size1220 ) = iprot.readMapBegin() - for _i1224 in xrange(_size1220): - _key1225 = iprot.readString() - _val1226 = iprot.readString() - self.part_vals[_key1225] = _val1226 + (_ktype1228, _vtype1229, _size1227 ) = iprot.readMapBegin() + for _i1231 in xrange(_size1227): + _key1232 = iprot.readString() + _val1233 = iprot.readString() + self.part_vals[_key1232] = _val1233 iprot.readMapEnd() else: iprot.skip(ftype) @@ -29421,9 +30305,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1227,viter1228 in self.part_vals.items(): - oprot.writeString(kiter1227) - oprot.writeString(viter1228) + for kiter1234,viter1235 in self.part_vals.items(): + oprot.writeString(kiter1234) + oprot.writeString(viter1235) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -33075,10 +33959,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1232, _size1229) = iprot.readListBegin() - for _i1233 in xrange(_size1229): - _elem1234 = iprot.readString() - self.success.append(_elem1234) + (_etype1239, _size1236) = iprot.readListBegin() + for _i1240 in xrange(_size1236): + _elem1241 = iprot.readString() + self.success.append(_elem1241) iprot.readListEnd() else: iprot.skip(ftype) @@ -33101,8 +33985,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1235 in self.success: - oprot.writeString(iter1235) + for iter1242 in self.success: + oprot.writeString(iter1242) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -33790,10 +34674,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1239, _size1236) = iprot.readListBegin() - for _i1240 in xrange(_size1236): - _elem1241 = iprot.readString() - self.success.append(_elem1241) + (_etype1246, _size1243) = iprot.readListBegin() + for _i1247 in xrange(_size1243): + _elem1248 = iprot.readString() + self.success.append(_elem1248) iprot.readListEnd() else: iprot.skip(ftype) @@ -33816,8 +34700,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1242 in self.success: - oprot.writeString(iter1242) + for iter1249 in self.success: + oprot.writeString(iter1249) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -34331,11 +35215,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1246, _size1243) = iprot.readListBegin() - for _i1247 in xrange(_size1243): - _elem1248 = Role() - _elem1248.read(iprot) - self.success.append(_elem1248) + (_etype1253, _size1250) = iprot.readListBegin() + for _i1254 in xrange(_size1250): + _elem1255 = Role() + _elem1255.read(iprot) + self.success.append(_elem1255) iprot.readListEnd() else: iprot.skip(ftype) @@ -34358,8 +35242,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1249 in self.success: - iter1249.write(oprot) + for iter1256 in self.success: + iter1256.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -34868,10 +35752,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype1253, _size1250) = iprot.readListBegin() - for _i1254 in xrange(_size1250): - _elem1255 = iprot.readString() - self.group_names.append(_elem1255) + (_etype1260, _size1257) = iprot.readListBegin() + for _i1261 in xrange(_size1257): + _elem1262 = iprot.readString() + self.group_names.append(_elem1262) iprot.readListEnd() else: iprot.skip(ftype) @@ -34896,8 +35780,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1256 in self.group_names: - oprot.writeString(iter1256) + for iter1263 in self.group_names: + oprot.writeString(iter1263) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -35124,11 +36008,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1260, _size1257) = iprot.readListBegin() - for _i1261 in xrange(_size1257): - _elem1262 = HiveObjectPrivilege() - _elem1262.read(iprot) - self.success.append(_elem1262) + (_etype1267, _size1264) = iprot.readListBegin() + for _i1268 in xrange(_size1264): + _elem1269 = HiveObjectPrivilege() + _elem1269.read(iprot) + self.success.append(_elem1269) iprot.readListEnd() else: iprot.skip(ftype) @@ -35151,8 +36035,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1263 in self.success: - iter1263.write(oprot) + for iter1270 in self.success: + iter1270.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -35650,10 +36534,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype1267, _size1264) = iprot.readListBegin() - for _i1268 in xrange(_size1264): - _elem1269 = iprot.readString() - self.group_names.append(_elem1269) + (_etype1274, _size1271) = iprot.readListBegin() + for _i1275 in xrange(_size1271): + _elem1276 = iprot.readString() + self.group_names.append(_elem1276) iprot.readListEnd() else: iprot.skip(ftype) @@ -35674,8 +36558,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1270 in self.group_names: - oprot.writeString(iter1270) + for iter1277 in self.group_names: + oprot.writeString(iter1277) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -35730,10 +36614,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1274, _size1271) = iprot.readListBegin() - for _i1275 in xrange(_size1271): - _elem1276 = iprot.readString() - self.success.append(_elem1276) + (_etype1281, _size1278) = iprot.readListBegin() + for _i1282 in xrange(_size1278): + _elem1283 = iprot.readString() + self.success.append(_elem1283) iprot.readListEnd() else: iprot.skip(ftype) @@ -35756,8 +36640,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1277 in self.success: - oprot.writeString(iter1277) + for iter1284 in self.success: + oprot.writeString(iter1284) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -36689,10 +37573,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1281, _size1278) = iprot.readListBegin() - for _i1282 in xrange(_size1278): - _elem1283 = iprot.readString() - self.success.append(_elem1283) + (_etype1288, _size1285) = iprot.readListBegin() + for _i1289 in xrange(_size1285): + _elem1290 = iprot.readString() + self.success.append(_elem1290) iprot.readListEnd() else: iprot.skip(ftype) @@ -36709,8 +37593,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1284 in self.success: - oprot.writeString(iter1284) + for iter1291 in self.success: + oprot.writeString(iter1291) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -37237,10 +38121,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1288, _size1285) = iprot.readListBegin() - for _i1289 in xrange(_size1285): - _elem1290 = iprot.readString() - self.success.append(_elem1290) + (_etype1295, _size1292) = iprot.readListBegin() + for _i1296 in xrange(_size1292): + _elem1297 = iprot.readString() + self.success.append(_elem1297) iprot.readListEnd() else: iprot.skip(ftype) @@ -37257,8 +38141,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1291 in self.success: - oprot.writeString(iter1291) + for iter1298 in self.success: + oprot.writeString(iter1298) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -45426,11 +46310,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1295, _size1292) = iprot.readListBegin() - for _i1296 in xrange(_size1292): - _elem1297 = SchemaVersion() - _elem1297.read(iprot) - self.success.append(_elem1297) + (_etype1302, _size1299) = iprot.readListBegin() + for _i1303 in xrange(_size1299): + _elem1304 = SchemaVersion() + _elem1304.read(iprot) + self.success.append(_elem1304) iprot.readListEnd() else: iprot.skip(ftype) @@ -45459,8 +46343,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1298 in self.success: - iter1298.write(oprot) + for iter1305 in self.success: + iter1305.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 82539edb95..0b9034aa3a 100644 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -570,6 +570,7 @@ class SQLPrimaryKey: - enable_cstr - validate_cstr - rely_cstr + - catName """ thrift_spec = ( @@ -582,9 +583,10 @@ class SQLPrimaryKey: (6, TType.BOOL, 'enable_cstr', None, None, ), # 6 (7, TType.BOOL, 'validate_cstr', None, None, ), # 7 (8, TType.BOOL, 'rely_cstr', None, None, ), # 8 + (9, TType.STRING, 'catName', None, None, ), # 9 ) - def __init__(self, table_db=None, table_name=None, column_name=None, key_seq=None, pk_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + def __init__(self, table_db=None, table_name=None, column_name=None, key_seq=None, pk_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None, catName=None,): self.table_db = table_db self.table_name = table_name self.column_name = column_name @@ -593,6 +595,7 @@ def __init__(self, table_db=None, table_name=None, column_name=None, key_seq=Non self.enable_cstr = enable_cstr self.validate_cstr = validate_cstr self.rely_cstr = rely_cstr + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -643,6 +646,11 @@ def read(self, iprot): self.rely_cstr = iprot.readBool() else: iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -685,6 +693,10 @@ def write(self, oprot): oprot.writeFieldBegin('rely_cstr', TType.BOOL, 8) oprot.writeBool(self.rely_cstr) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 9) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -702,6 +714,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.enable_cstr) value = (value * 31) ^ hash(self.validate_cstr) value = (value * 31) ^ hash(self.rely_cstr) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -732,6 +745,7 @@ class SQLForeignKey: - enable_cstr - validate_cstr - rely_cstr + - catName """ thrift_spec = ( @@ -750,9 +764,10 @@ class SQLForeignKey: (12, TType.BOOL, 'enable_cstr', None, None, ), # 12 (13, TType.BOOL, 'validate_cstr', None, None, ), # 13 (14, TType.BOOL, 'rely_cstr', None, None, ), # 14 + (15, TType.STRING, 'catName', None, None, ), # 15 ) - def __init__(self, pktable_db=None, pktable_name=None, pkcolumn_name=None, fktable_db=None, fktable_name=None, fkcolumn_name=None, key_seq=None, update_rule=None, delete_rule=None, fk_name=None, pk_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + def __init__(self, pktable_db=None, pktable_name=None, pkcolumn_name=None, fktable_db=None, fktable_name=None, fkcolumn_name=None, key_seq=None, update_rule=None, delete_rule=None, fk_name=None, pk_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None, catName=None,): self.pktable_db = pktable_db self.pktable_name = pktable_name self.pkcolumn_name = pkcolumn_name @@ -767,6 +782,7 @@ def __init__(self, pktable_db=None, pktable_name=None, pkcolumn_name=None, fktab self.enable_cstr = enable_cstr self.validate_cstr = validate_cstr self.rely_cstr = rely_cstr + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -847,6 +863,11 @@ def read(self, iprot): self.rely_cstr = iprot.readBool() else: iprot.skip(ftype) + elif fid == 15: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -913,6 +934,10 @@ def write(self, oprot): oprot.writeFieldBegin('rely_cstr', TType.BOOL, 14) oprot.writeBool(self.rely_cstr) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 15) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -936,6 +961,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.enable_cstr) value = (value * 31) ^ hash(self.validate_cstr) value = (value * 31) ^ hash(self.rely_cstr) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -952,6 +978,7 @@ def __ne__(self, other): class SQLUniqueConstraint: """ Attributes: + - catName - table_db - table_name - column_name @@ -964,17 +991,19 @@ class SQLUniqueConstraint: thrift_spec = ( None, # 0 - (1, TType.STRING, 'table_db', None, None, ), # 1 - (2, TType.STRING, 'table_name', None, None, ), # 2 - (3, TType.STRING, 'column_name', None, None, ), # 3 - (4, TType.I32, 'key_seq', None, None, ), # 4 - (5, TType.STRING, 'uk_name', None, None, ), # 5 - (6, TType.BOOL, 'enable_cstr', None, None, ), # 6 - (7, TType.BOOL, 'validate_cstr', None, None, ), # 7 - (8, TType.BOOL, 'rely_cstr', None, None, ), # 8 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'table_db', None, None, ), # 2 + (3, TType.STRING, 'table_name', None, None, ), # 3 + (4, TType.STRING, 'column_name', None, None, ), # 4 + (5, TType.I32, 'key_seq', None, None, ), # 5 + (6, TType.STRING, 'uk_name', None, None, ), # 6 + (7, TType.BOOL, 'enable_cstr', None, None, ), # 7 + (8, TType.BOOL, 'validate_cstr', None, None, ), # 8 + (9, TType.BOOL, 'rely_cstr', None, None, ), # 9 ) - def __init__(self, table_db=None, table_name=None, column_name=None, key_seq=None, uk_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + def __init__(self, catName=None, table_db=None, table_name=None, column_name=None, key_seq=None, uk_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + self.catName = catName self.table_db = table_db self.table_name = table_name self.column_name = column_name @@ -995,40 +1024,45 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.table_db = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: - self.table_name = iprot.readString() + self.table_db = iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: - self.column_name = iprot.readString() + self.table_name = iprot.readString() else: iprot.skip(ftype) elif fid == 4: + if ftype == TType.STRING: + self.column_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: if ftype == TType.I32: self.key_seq = iprot.readI32() else: iprot.skip(ftype) - elif fid == 5: + elif fid == 6: if ftype == TType.STRING: self.uk_name = iprot.readString() else: iprot.skip(ftype) - elif fid == 6: + elif fid == 7: if ftype == TType.BOOL: self.enable_cstr = iprot.readBool() else: iprot.skip(ftype) - elif fid == 7: + elif fid == 8: if ftype == TType.BOOL: self.validate_cstr = iprot.readBool() else: iprot.skip(ftype) - elif fid == 8: + elif fid == 9: if ftype == TType.BOOL: self.rely_cstr = iprot.readBool() else: @@ -1043,36 +1077,40 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('SQLUniqueConstraint') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.table_db is not None: - oprot.writeFieldBegin('table_db', TType.STRING, 1) + oprot.writeFieldBegin('table_db', TType.STRING, 2) oprot.writeString(self.table_db) oprot.writeFieldEnd() if self.table_name is not None: - oprot.writeFieldBegin('table_name', TType.STRING, 2) + oprot.writeFieldBegin('table_name', TType.STRING, 3) oprot.writeString(self.table_name) oprot.writeFieldEnd() if self.column_name is not None: - oprot.writeFieldBegin('column_name', TType.STRING, 3) + oprot.writeFieldBegin('column_name', TType.STRING, 4) oprot.writeString(self.column_name) oprot.writeFieldEnd() if self.key_seq is not None: - oprot.writeFieldBegin('key_seq', TType.I32, 4) + oprot.writeFieldBegin('key_seq', TType.I32, 5) oprot.writeI32(self.key_seq) oprot.writeFieldEnd() if self.uk_name is not None: - oprot.writeFieldBegin('uk_name', TType.STRING, 5) + oprot.writeFieldBegin('uk_name', TType.STRING, 6) oprot.writeString(self.uk_name) oprot.writeFieldEnd() if self.enable_cstr is not None: - oprot.writeFieldBegin('enable_cstr', TType.BOOL, 6) + oprot.writeFieldBegin('enable_cstr', TType.BOOL, 7) oprot.writeBool(self.enable_cstr) oprot.writeFieldEnd() if self.validate_cstr is not None: - oprot.writeFieldBegin('validate_cstr', TType.BOOL, 7) + oprot.writeFieldBegin('validate_cstr', TType.BOOL, 8) oprot.writeBool(self.validate_cstr) oprot.writeFieldEnd() if self.rely_cstr is not None: - oprot.writeFieldBegin('rely_cstr', TType.BOOL, 8) + oprot.writeFieldBegin('rely_cstr', TType.BOOL, 9) oprot.writeBool(self.rely_cstr) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -1084,6 +1122,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.table_db) value = (value * 31) ^ hash(self.table_name) value = (value * 31) ^ hash(self.column_name) @@ -1108,6 +1147,7 @@ def __ne__(self, other): class SQLNotNullConstraint: """ Attributes: + - catName - table_db - table_name - column_name @@ -1119,16 +1159,18 @@ class SQLNotNullConstraint: thrift_spec = ( None, # 0 - (1, TType.STRING, 'table_db', None, None, ), # 1 - (2, TType.STRING, 'table_name', None, None, ), # 2 - (3, TType.STRING, 'column_name', None, None, ), # 3 - (4, TType.STRING, 'nn_name', None, None, ), # 4 - (5, TType.BOOL, 'enable_cstr', None, None, ), # 5 - (6, TType.BOOL, 'validate_cstr', None, None, ), # 6 - (7, TType.BOOL, 'rely_cstr', None, None, ), # 7 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'table_db', None, None, ), # 2 + (3, TType.STRING, 'table_name', None, None, ), # 3 + (4, TType.STRING, 'column_name', None, None, ), # 4 + (5, TType.STRING, 'nn_name', None, None, ), # 5 + (6, TType.BOOL, 'enable_cstr', None, None, ), # 6 + (7, TType.BOOL, 'validate_cstr', None, None, ), # 7 + (8, TType.BOOL, 'rely_cstr', None, None, ), # 8 ) - def __init__(self, table_db=None, table_name=None, column_name=None, nn_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + def __init__(self, catName=None, table_db=None, table_name=None, column_name=None, nn_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + self.catName = catName self.table_db = table_db self.table_name = table_name self.column_name = column_name @@ -1148,35 +1190,40 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.table_db = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: - self.table_name = iprot.readString() + self.table_db = iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: - self.column_name = iprot.readString() + self.table_name = iprot.readString() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: - self.nn_name = iprot.readString() + self.column_name = iprot.readString() else: iprot.skip(ftype) elif fid == 5: + if ftype == TType.STRING: + self.nn_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 6: if ftype == TType.BOOL: self.enable_cstr = iprot.readBool() else: iprot.skip(ftype) - elif fid == 6: + elif fid == 7: if ftype == TType.BOOL: self.validate_cstr = iprot.readBool() else: iprot.skip(ftype) - elif fid == 7: + elif fid == 8: if ftype == TType.BOOL: self.rely_cstr = iprot.readBool() else: @@ -1191,32 +1238,36 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('SQLNotNullConstraint') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.table_db is not None: - oprot.writeFieldBegin('table_db', TType.STRING, 1) + oprot.writeFieldBegin('table_db', TType.STRING, 2) oprot.writeString(self.table_db) oprot.writeFieldEnd() if self.table_name is not None: - oprot.writeFieldBegin('table_name', TType.STRING, 2) + oprot.writeFieldBegin('table_name', TType.STRING, 3) oprot.writeString(self.table_name) oprot.writeFieldEnd() if self.column_name is not None: - oprot.writeFieldBegin('column_name', TType.STRING, 3) + oprot.writeFieldBegin('column_name', TType.STRING, 4) oprot.writeString(self.column_name) oprot.writeFieldEnd() if self.nn_name is not None: - oprot.writeFieldBegin('nn_name', TType.STRING, 4) + oprot.writeFieldBegin('nn_name', TType.STRING, 5) oprot.writeString(self.nn_name) oprot.writeFieldEnd() if self.enable_cstr is not None: - oprot.writeFieldBegin('enable_cstr', TType.BOOL, 5) + oprot.writeFieldBegin('enable_cstr', TType.BOOL, 6) oprot.writeBool(self.enable_cstr) oprot.writeFieldEnd() if self.validate_cstr is not None: - oprot.writeFieldBegin('validate_cstr', TType.BOOL, 6) + oprot.writeFieldBegin('validate_cstr', TType.BOOL, 7) oprot.writeBool(self.validate_cstr) oprot.writeFieldEnd() if self.rely_cstr is not None: - oprot.writeFieldBegin('rely_cstr', TType.BOOL, 7) + oprot.writeFieldBegin('rely_cstr', TType.BOOL, 8) oprot.writeBool(self.rely_cstr) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -1228,6 +1279,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.table_db) value = (value * 31) ^ hash(self.table_name) value = (value * 31) ^ hash(self.column_name) @@ -1251,6 +1303,7 @@ def __ne__(self, other): class SQLDefaultConstraint: """ Attributes: + - catName - table_db - table_name - column_name @@ -1263,17 +1316,19 @@ class SQLDefaultConstraint: thrift_spec = ( None, # 0 - (1, TType.STRING, 'table_db', None, None, ), # 1 - (2, TType.STRING, 'table_name', None, None, ), # 2 - (3, TType.STRING, 'column_name', None, None, ), # 3 - (4, TType.STRING, 'default_value', None, None, ), # 4 - (5, TType.STRING, 'dc_name', None, None, ), # 5 - (6, TType.BOOL, 'enable_cstr', None, None, ), # 6 - (7, TType.BOOL, 'validate_cstr', None, None, ), # 7 - (8, TType.BOOL, 'rely_cstr', None, None, ), # 8 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'table_db', None, None, ), # 2 + (3, TType.STRING, 'table_name', None, None, ), # 3 + (4, TType.STRING, 'column_name', None, None, ), # 4 + (5, TType.STRING, 'default_value', None, None, ), # 5 + (6, TType.STRING, 'dc_name', None, None, ), # 6 + (7, TType.BOOL, 'enable_cstr', None, None, ), # 7 + (8, TType.BOOL, 'validate_cstr', None, None, ), # 8 + (9, TType.BOOL, 'rely_cstr', None, None, ), # 9 ) - def __init__(self, table_db=None, table_name=None, column_name=None, default_value=None, dc_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + def __init__(self, catName=None, table_db=None, table_name=None, column_name=None, default_value=None, dc_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + self.catName = catName self.table_db = table_db self.table_name = table_name self.column_name = column_name @@ -1294,40 +1349,45 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.table_db = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: - self.table_name = iprot.readString() + self.table_db = iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: - self.column_name = iprot.readString() + self.table_name = iprot.readString() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: - self.default_value = iprot.readString() + self.column_name = iprot.readString() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: - self.dc_name = iprot.readString() + self.default_value = iprot.readString() else: iprot.skip(ftype) elif fid == 6: + if ftype == TType.STRING: + self.dc_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 7: if ftype == TType.BOOL: self.enable_cstr = iprot.readBool() else: iprot.skip(ftype) - elif fid == 7: + elif fid == 8: if ftype == TType.BOOL: self.validate_cstr = iprot.readBool() else: iprot.skip(ftype) - elif fid == 8: + elif fid == 9: if ftype == TType.BOOL: self.rely_cstr = iprot.readBool() else: @@ -1342,36 +1402,40 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('SQLDefaultConstraint') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.table_db is not None: - oprot.writeFieldBegin('table_db', TType.STRING, 1) + oprot.writeFieldBegin('table_db', TType.STRING, 2) oprot.writeString(self.table_db) oprot.writeFieldEnd() if self.table_name is not None: - oprot.writeFieldBegin('table_name', TType.STRING, 2) + oprot.writeFieldBegin('table_name', TType.STRING, 3) oprot.writeString(self.table_name) oprot.writeFieldEnd() if self.column_name is not None: - oprot.writeFieldBegin('column_name', TType.STRING, 3) + oprot.writeFieldBegin('column_name', TType.STRING, 4) oprot.writeString(self.column_name) oprot.writeFieldEnd() if self.default_value is not None: - oprot.writeFieldBegin('default_value', TType.STRING, 4) + oprot.writeFieldBegin('default_value', TType.STRING, 5) oprot.writeString(self.default_value) oprot.writeFieldEnd() if self.dc_name is not None: - oprot.writeFieldBegin('dc_name', TType.STRING, 5) + oprot.writeFieldBegin('dc_name', TType.STRING, 6) oprot.writeString(self.dc_name) oprot.writeFieldEnd() if self.enable_cstr is not None: - oprot.writeFieldBegin('enable_cstr', TType.BOOL, 6) + oprot.writeFieldBegin('enable_cstr', TType.BOOL, 7) oprot.writeBool(self.enable_cstr) oprot.writeFieldEnd() if self.validate_cstr is not None: - oprot.writeFieldBegin('validate_cstr', TType.BOOL, 7) + oprot.writeFieldBegin('validate_cstr', TType.BOOL, 8) oprot.writeBool(self.validate_cstr) oprot.writeFieldEnd() if self.rely_cstr is not None: - oprot.writeFieldBegin('rely_cstr', TType.BOOL, 8) + oprot.writeFieldBegin('rely_cstr', TType.BOOL, 9) oprot.writeBool(self.rely_cstr) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -1383,6 +1447,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.table_db) value = (value * 31) ^ hash(self.table_name) value = (value * 31) ^ hash(self.column_name) @@ -1407,6 +1472,7 @@ def __ne__(self, other): class SQLCheckConstraint: """ Attributes: + - catName - table_db - table_name - column_name @@ -1419,17 +1485,19 @@ class SQLCheckConstraint: thrift_spec = ( None, # 0 - (1, TType.STRING, 'table_db', None, None, ), # 1 - (2, TType.STRING, 'table_name', None, None, ), # 2 - (3, TType.STRING, 'column_name', None, None, ), # 3 - (4, TType.STRING, 'check_expression', None, None, ), # 4 - (5, TType.STRING, 'dc_name', None, None, ), # 5 - (6, TType.BOOL, 'enable_cstr', None, None, ), # 6 - (7, TType.BOOL, 'validate_cstr', None, None, ), # 7 - (8, TType.BOOL, 'rely_cstr', None, None, ), # 8 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'table_db', None, None, ), # 2 + (3, TType.STRING, 'table_name', None, None, ), # 3 + (4, TType.STRING, 'column_name', None, None, ), # 4 + (5, TType.STRING, 'check_expression', None, None, ), # 5 + (6, TType.STRING, 'dc_name', None, None, ), # 6 + (7, TType.BOOL, 'enable_cstr', None, None, ), # 7 + (8, TType.BOOL, 'validate_cstr', None, None, ), # 8 + (9, TType.BOOL, 'rely_cstr', None, None, ), # 9 ) - def __init__(self, table_db=None, table_name=None, column_name=None, check_expression=None, dc_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + def __init__(self, catName=None, table_db=None, table_name=None, column_name=None, check_expression=None, dc_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + self.catName = catName self.table_db = table_db self.table_name = table_name self.column_name = column_name @@ -1450,40 +1518,45 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.table_db = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: - self.table_name = iprot.readString() + self.table_db = iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: - self.column_name = iprot.readString() + self.table_name = iprot.readString() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: - self.check_expression = iprot.readString() + self.column_name = iprot.readString() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: - self.dc_name = iprot.readString() + self.check_expression = iprot.readString() else: iprot.skip(ftype) elif fid == 6: + if ftype == TType.STRING: + self.dc_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 7: if ftype == TType.BOOL: self.enable_cstr = iprot.readBool() else: iprot.skip(ftype) - elif fid == 7: + elif fid == 8: if ftype == TType.BOOL: self.validate_cstr = iprot.readBool() else: iprot.skip(ftype) - elif fid == 8: + elif fid == 9: if ftype == TType.BOOL: self.rely_cstr = iprot.readBool() else: @@ -1498,36 +1571,40 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('SQLCheckConstraint') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.table_db is not None: - oprot.writeFieldBegin('table_db', TType.STRING, 1) + oprot.writeFieldBegin('table_db', TType.STRING, 2) oprot.writeString(self.table_db) oprot.writeFieldEnd() if self.table_name is not None: - oprot.writeFieldBegin('table_name', TType.STRING, 2) + oprot.writeFieldBegin('table_name', TType.STRING, 3) oprot.writeString(self.table_name) oprot.writeFieldEnd() if self.column_name is not None: - oprot.writeFieldBegin('column_name', TType.STRING, 3) + oprot.writeFieldBegin('column_name', TType.STRING, 4) oprot.writeString(self.column_name) oprot.writeFieldEnd() if self.check_expression is not None: - oprot.writeFieldBegin('check_expression', TType.STRING, 4) + oprot.writeFieldBegin('check_expression', TType.STRING, 5) oprot.writeString(self.check_expression) oprot.writeFieldEnd() if self.dc_name is not None: - oprot.writeFieldBegin('dc_name', TType.STRING, 5) + oprot.writeFieldBegin('dc_name', TType.STRING, 6) oprot.writeString(self.dc_name) oprot.writeFieldEnd() if self.enable_cstr is not None: - oprot.writeFieldBegin('enable_cstr', TType.BOOL, 6) + oprot.writeFieldBegin('enable_cstr', TType.BOOL, 7) oprot.writeBool(self.enable_cstr) oprot.writeFieldEnd() if self.validate_cstr is not None: - oprot.writeFieldBegin('validate_cstr', TType.BOOL, 7) + oprot.writeFieldBegin('validate_cstr', TType.BOOL, 8) oprot.writeBool(self.validate_cstr) oprot.writeFieldEnd() if self.rely_cstr is not None: - oprot.writeFieldBegin('rely_cstr', TType.BOOL, 8) + oprot.writeFieldBegin('rely_cstr', TType.BOOL, 9) oprot.writeBool(self.rely_cstr) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -1539,6 +1616,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.table_db) value = (value * 31) ^ hash(self.table_name) value = (value * 31) ^ hash(self.column_name) @@ -1681,6 +1759,7 @@ class HiveObjectRef: - objectName - partValues - columnName + - catName """ thrift_spec = ( @@ -1690,14 +1769,16 @@ class HiveObjectRef: (3, TType.STRING, 'objectName', None, None, ), # 3 (4, TType.LIST, 'partValues', (TType.STRING,None), None, ), # 4 (5, TType.STRING, 'columnName', None, None, ), # 5 + (6, TType.STRING, 'catName', None, None, ), # 6 ) - def __init__(self, objectType=None, dbName=None, objectName=None, partValues=None, columnName=None,): + def __init__(self, objectType=None, dbName=None, objectName=None, partValues=None, columnName=None, catName=None,): self.objectType = objectType self.dbName = dbName self.objectName = objectName self.partValues = partValues self.columnName = columnName + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -1738,6 +1819,11 @@ def read(self, iprot): self.columnName = iprot.readString() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -1771,6 +1857,10 @@ def write(self, oprot): oprot.writeFieldBegin('columnName', TType.STRING, 5) oprot.writeString(self.columnName) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 6) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -1785,6 +1875,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.objectName) value = (value * 31) ^ hash(self.partValues) value = (value * 31) ^ hash(self.columnName) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -3143,16 +3234,12 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class Database: +class Catalog: """ Attributes: - name - description - locationUri - - parameters - - privileges - - ownerName - - ownerType """ thrift_spec = ( @@ -3160,20 +3247,12 @@ class Database: (1, TType.STRING, 'name', None, None, ), # 1 (2, TType.STRING, 'description', None, None, ), # 2 (3, TType.STRING, 'locationUri', None, None, ), # 3 - (4, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 4 - (5, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 5 - (6, TType.STRING, 'ownerName', None, None, ), # 6 - (7, TType.I32, 'ownerType', None, None, ), # 7 ) - def __init__(self, name=None, description=None, locationUri=None, parameters=None, privileges=None, ownerName=None, ownerType=None,): + def __init__(self, name=None, description=None, locationUri=None,): self.name = name self.description = description self.locationUri = locationUri - self.parameters = parameters - self.privileges = privileges - self.ownerName = ownerName - self.ownerType = ownerType def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -3199,33 +3278,6 @@ def read(self, iprot): self.locationUri = iprot.readString() else: iprot.skip(ftype) - elif fid == 4: - if ftype == TType.MAP: - self.parameters = {} - (_ktype84, _vtype85, _size83 ) = iprot.readMapBegin() - for _i87 in xrange(_size83): - _key88 = iprot.readString() - _val89 = iprot.readString() - self.parameters[_key88] = _val89 - iprot.readMapEnd() - else: - iprot.skip(ftype) - elif fid == 5: - if ftype == TType.STRUCT: - self.privileges = PrincipalPrivilegeSet() - self.privileges.read(iprot) - else: - iprot.skip(ftype) - elif fid == 6: - if ftype == TType.STRING: - self.ownerName = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 7: - if ftype == TType.I32: - self.ownerType = iprot.readI32() - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -3235,7 +3287,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('Database') + oprot.writeStructBegin('Catalog') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name) @@ -3248,26 +3300,6 @@ def write(self, oprot): oprot.writeFieldBegin('locationUri', TType.STRING, 3) oprot.writeString(self.locationUri) oprot.writeFieldEnd() - if self.parameters is not None: - oprot.writeFieldBegin('parameters', TType.MAP, 4) - oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter90,viter91 in self.parameters.items(): - oprot.writeString(kiter90) - oprot.writeString(viter91) - oprot.writeMapEnd() - oprot.writeFieldEnd() - if self.privileges is not None: - oprot.writeFieldBegin('privileges', TType.STRUCT, 5) - self.privileges.write(oprot) - oprot.writeFieldEnd() - if self.ownerName is not None: - oprot.writeFieldBegin('ownerName', TType.STRING, 6) - oprot.writeString(self.ownerName) - oprot.writeFieldEnd() - if self.ownerType is not None: - oprot.writeFieldBegin('ownerType', TType.I32, 7) - oprot.writeI32(self.ownerType) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -3280,10 +3312,6 @@ def __hash__(self): value = (value * 31) ^ hash(self.name) value = (value * 31) ^ hash(self.description) value = (value * 31) ^ hash(self.locationUri) - value = (value * 31) ^ hash(self.parameters) - value = (value * 31) ^ hash(self.privileges) - value = (value * 31) ^ hash(self.ownerName) - value = (value * 31) ^ hash(self.ownerType) return value def __repr__(self): @@ -3297,37 +3325,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class SerDeInfo: +class CreateCatalogRequest: """ Attributes: - - name - - serializationLib - - parameters - - description - - serializerClass - - deserializerClass - - serdeType + - catalog """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'name', None, None, ), # 1 - (2, TType.STRING, 'serializationLib', None, None, ), # 2 - (3, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 3 - (4, TType.STRING, 'description', None, None, ), # 4 - (5, TType.STRING, 'serializerClass', None, None, ), # 5 - (6, TType.STRING, 'deserializerClass', None, None, ), # 6 - (7, TType.I32, 'serdeType', None, None, ), # 7 + (1, TType.STRUCT, 'catalog', (Catalog, Catalog.thrift_spec), None, ), # 1 ) - def __init__(self, name=None, serializationLib=None, parameters=None, description=None, serializerClass=None, deserializerClass=None, serdeType=None,): - self.name = name - self.serializationLib = serializationLib - self.parameters = parameters - self.description = description - self.serializerClass = serializerClass - self.deserializerClass = deserializerClass - self.serdeType = serdeType + def __init__(self, catalog=None,): + self.catalog = catalog def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -3339,44 +3349,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.serializationLib = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 3: - if ftype == TType.MAP: - self.parameters = {} - (_ktype93, _vtype94, _size92 ) = iprot.readMapBegin() - for _i96 in xrange(_size92): - _key97 = iprot.readString() - _val98 = iprot.readString() - self.parameters[_key97] = _val98 - iprot.readMapEnd() - else: - iprot.skip(ftype) - elif fid == 4: - if ftype == TType.STRING: - self.description = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 5: - if ftype == TType.STRING: - self.serializerClass = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 6: - if ftype == TType.STRING: - self.deserializerClass = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 7: - if ftype == TType.I32: - self.serdeType = iprot.readI32() + if ftype == TType.STRUCT: + self.catalog = Catalog() + self.catalog.read(iprot) else: iprot.skip(ftype) else: @@ -3388,8 +3363,563 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('SerDeInfo') - if self.name is not None: + oprot.writeStructBegin('CreateCatalogRequest') + if self.catalog is not None: + oprot.writeFieldBegin('catalog', TType.STRUCT, 1) + self.catalog.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.catalog) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetCatalogRequest: + """ + Attributes: + - name + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'name', None, None, ), # 1 + ) + + def __init__(self, name=None,): + self.name = name + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetCatalogRequest') + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 1) + oprot.writeString(self.name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.name) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetCatalogResponse: + """ + Attributes: + - catalog + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'catalog', (Catalog, Catalog.thrift_spec), None, ), # 1 + ) + + def __init__(self, catalog=None,): + self.catalog = catalog + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.catalog = Catalog() + self.catalog.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetCatalogResponse') + if self.catalog is not None: + oprot.writeFieldBegin('catalog', TType.STRUCT, 1) + self.catalog.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.catalog) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetCatalogsResponse: + """ + Attributes: + - names + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'names', (TType.STRING,None), None, ), # 1 + ) + + def __init__(self, names=None,): + self.names = names + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.names = [] + (_etype86, _size83) = iprot.readListBegin() + for _i87 in xrange(_size83): + _elem88 = iprot.readString() + self.names.append(_elem88) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetCatalogsResponse') + if self.names is not None: + oprot.writeFieldBegin('names', TType.LIST, 1) + oprot.writeListBegin(TType.STRING, len(self.names)) + for iter89 in self.names: + oprot.writeString(iter89) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.names) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class DropCatalogRequest: + """ + Attributes: + - name + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'name', None, None, ), # 1 + ) + + def __init__(self, name=None,): + self.name = name + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('DropCatalogRequest') + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 1) + oprot.writeString(self.name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.name) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class Database: + """ + Attributes: + - name + - description + - locationUri + - parameters + - privileges + - ownerName + - ownerType + - catalogName + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'name', None, None, ), # 1 + (2, TType.STRING, 'description', None, None, ), # 2 + (3, TType.STRING, 'locationUri', None, None, ), # 3 + (4, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 4 + (5, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 5 + (6, TType.STRING, 'ownerName', None, None, ), # 6 + (7, TType.I32, 'ownerType', None, None, ), # 7 + (8, TType.STRING, 'catalogName', None, None, ), # 8 + ) + + def __init__(self, name=None, description=None, locationUri=None, parameters=None, privileges=None, ownerName=None, ownerType=None, catalogName=None,): + self.name = name + self.description = description + self.locationUri = locationUri + self.parameters = parameters + self.privileges = privileges + self.ownerName = ownerName + self.ownerType = ownerType + self.catalogName = catalogName + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.description = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.locationUri = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.MAP: + self.parameters = {} + (_ktype91, _vtype92, _size90 ) = iprot.readMapBegin() + for _i94 in xrange(_size90): + _key95 = iprot.readString() + _val96 = iprot.readString() + self.parameters[_key95] = _val96 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRUCT: + self.privileges = PrincipalPrivilegeSet() + self.privileges.read(iprot) + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.ownerName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I32: + self.ownerType = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.catalogName = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('Database') + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 1) + oprot.writeString(self.name) + oprot.writeFieldEnd() + if self.description is not None: + oprot.writeFieldBegin('description', TType.STRING, 2) + oprot.writeString(self.description) + oprot.writeFieldEnd() + if self.locationUri is not None: + oprot.writeFieldBegin('locationUri', TType.STRING, 3) + oprot.writeString(self.locationUri) + oprot.writeFieldEnd() + if self.parameters is not None: + oprot.writeFieldBegin('parameters', TType.MAP, 4) + oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) + for kiter97,viter98 in self.parameters.items(): + oprot.writeString(kiter97) + oprot.writeString(viter98) + oprot.writeMapEnd() + oprot.writeFieldEnd() + if self.privileges is not None: + oprot.writeFieldBegin('privileges', TType.STRUCT, 5) + self.privileges.write(oprot) + oprot.writeFieldEnd() + if self.ownerName is not None: + oprot.writeFieldBegin('ownerName', TType.STRING, 6) + oprot.writeString(self.ownerName) + oprot.writeFieldEnd() + if self.ownerType is not None: + oprot.writeFieldBegin('ownerType', TType.I32, 7) + oprot.writeI32(self.ownerType) + oprot.writeFieldEnd() + if self.catalogName is not None: + oprot.writeFieldBegin('catalogName', TType.STRING, 8) + oprot.writeString(self.catalogName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.name) + value = (value * 31) ^ hash(self.description) + value = (value * 31) ^ hash(self.locationUri) + value = (value * 31) ^ hash(self.parameters) + value = (value * 31) ^ hash(self.privileges) + value = (value * 31) ^ hash(self.ownerName) + value = (value * 31) ^ hash(self.ownerType) + value = (value * 31) ^ hash(self.catalogName) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class SerDeInfo: + """ + Attributes: + - name + - serializationLib + - parameters + - description + - serializerClass + - deserializerClass + - serdeType + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'name', None, None, ), # 1 + (2, TType.STRING, 'serializationLib', None, None, ), # 2 + (3, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 3 + (4, TType.STRING, 'description', None, None, ), # 4 + (5, TType.STRING, 'serializerClass', None, None, ), # 5 + (6, TType.STRING, 'deserializerClass', None, None, ), # 6 + (7, TType.I32, 'serdeType', None, None, ), # 7 + ) + + def __init__(self, name=None, serializationLib=None, parameters=None, description=None, serializerClass=None, deserializerClass=None, serdeType=None,): + self.name = name + self.serializationLib = serializationLib + self.parameters = parameters + self.description = description + self.serializerClass = serializerClass + self.deserializerClass = deserializerClass + self.serdeType = serdeType + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.serializationLib = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.MAP: + self.parameters = {} + (_ktype100, _vtype101, _size99 ) = iprot.readMapBegin() + for _i103 in xrange(_size99): + _key104 = iprot.readString() + _val105 = iprot.readString() + self.parameters[_key104] = _val105 + iprot.readMapEnd() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.description = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.serializerClass = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.deserializerClass = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I32: + self.serdeType = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('SerDeInfo') + if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name) oprot.writeFieldEnd() @@ -3400,9 +3930,9 @@ def write(self, oprot): if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter99,viter100 in self.parameters.items(): - oprot.writeString(kiter99) - oprot.writeString(viter100) + for kiter106,viter107 in self.parameters.items(): + oprot.writeString(kiter106) + oprot.writeString(viter107) oprot.writeMapEnd() oprot.writeFieldEnd() if self.description is not None: @@ -3560,41 +4090,41 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.skewedColNames = [] - (_etype104, _size101) = iprot.readListBegin() - for _i105 in xrange(_size101): - _elem106 = iprot.readString() - self.skewedColNames.append(_elem106) + (_etype111, _size108) = iprot.readListBegin() + for _i112 in xrange(_size108): + _elem113 = iprot.readString() + self.skewedColNames.append(_elem113) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.skewedColValues = [] - (_etype110, _size107) = iprot.readListBegin() - for _i111 in xrange(_size107): - _elem112 = [] - (_etype116, _size113) = iprot.readListBegin() - for _i117 in xrange(_size113): - _elem118 = iprot.readString() - _elem112.append(_elem118) + (_etype117, _size114) = iprot.readListBegin() + for _i118 in xrange(_size114): + _elem119 = [] + (_etype123, _size120) = iprot.readListBegin() + for _i124 in xrange(_size120): + _elem125 = iprot.readString() + _elem119.append(_elem125) iprot.readListEnd() - self.skewedColValues.append(_elem112) + self.skewedColValues.append(_elem119) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP: self.skewedColValueLocationMaps = {} - (_ktype120, _vtype121, _size119 ) = iprot.readMapBegin() - for _i123 in xrange(_size119): - _key124 = [] - (_etype129, _size126) = iprot.readListBegin() - for _i130 in xrange(_size126): - _elem131 = iprot.readString() - _key124.append(_elem131) + (_ktype127, _vtype128, _size126 ) = iprot.readMapBegin() + for _i130 in xrange(_size126): + _key131 = [] + (_etype136, _size133) = iprot.readListBegin() + for _i137 in xrange(_size133): + _elem138 = iprot.readString() + _key131.append(_elem138) iprot.readListEnd() - _val125 = iprot.readString() - self.skewedColValueLocationMaps[_key124] = _val125 + _val132 = iprot.readString() + self.skewedColValueLocationMaps[_key131] = _val132 iprot.readMapEnd() else: iprot.skip(ftype) @@ -3611,29 +4141,29 @@ def write(self, oprot): if self.skewedColNames is not None: oprot.writeFieldBegin('skewedColNames', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.skewedColNames)) - for iter132 in self.skewedColNames: - oprot.writeString(iter132) + for iter139 in self.skewedColNames: + oprot.writeString(iter139) oprot.writeListEnd() oprot.writeFieldEnd() if self.skewedColValues is not None: oprot.writeFieldBegin('skewedColValues', TType.LIST, 2) oprot.writeListBegin(TType.LIST, len(self.skewedColValues)) - for iter133 in self.skewedColValues: - oprot.writeListBegin(TType.STRING, len(iter133)) - for iter134 in iter133: - oprot.writeString(iter134) + for iter140 in self.skewedColValues: + oprot.writeListBegin(TType.STRING, len(iter140)) + for iter141 in iter140: + oprot.writeString(iter141) oprot.writeListEnd() oprot.writeListEnd() oprot.writeFieldEnd() if self.skewedColValueLocationMaps is not None: oprot.writeFieldBegin('skewedColValueLocationMaps', TType.MAP, 3) oprot.writeMapBegin(TType.LIST, TType.STRING, len(self.skewedColValueLocationMaps)) - for kiter135,viter136 in self.skewedColValueLocationMaps.items(): - oprot.writeListBegin(TType.STRING, len(kiter135)) - for iter137 in kiter135: - oprot.writeString(iter137) + for kiter142,viter143 in self.skewedColValueLocationMaps.items(): + oprot.writeListBegin(TType.STRING, len(kiter142)) + for iter144 in kiter142: + oprot.writeString(iter144) oprot.writeListEnd() - oprot.writeString(viter136) + oprot.writeString(viter143) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -3720,11 +4250,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.cols = [] - (_etype141, _size138) = iprot.readListBegin() - for _i142 in xrange(_size138): - _elem143 = FieldSchema() - _elem143.read(iprot) - self.cols.append(_elem143) + (_etype148, _size145) = iprot.readListBegin() + for _i149 in xrange(_size145): + _elem150 = FieldSchema() + _elem150.read(iprot) + self.cols.append(_elem150) iprot.readListEnd() else: iprot.skip(ftype) @@ -3762,32 +4292,32 @@ def read(self, iprot): elif fid == 8: if ftype == TType.LIST: self.bucketCols = [] - (_etype147, _size144) = iprot.readListBegin() - for _i148 in xrange(_size144): - _elem149 = iprot.readString() - self.bucketCols.append(_elem149) + (_etype154, _size151) = iprot.readListBegin() + for _i155 in xrange(_size151): + _elem156 = iprot.readString() + self.bucketCols.append(_elem156) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 9: if ftype == TType.LIST: self.sortCols = [] - (_etype153, _size150) = iprot.readListBegin() - for _i154 in xrange(_size150): - _elem155 = Order() - _elem155.read(iprot) - self.sortCols.append(_elem155) + (_etype160, _size157) = iprot.readListBegin() + for _i161 in xrange(_size157): + _elem162 = Order() + _elem162.read(iprot) + self.sortCols.append(_elem162) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 10: if ftype == TType.MAP: self.parameters = {} - (_ktype157, _vtype158, _size156 ) = iprot.readMapBegin() - for _i160 in xrange(_size156): - _key161 = iprot.readString() - _val162 = iprot.readString() - self.parameters[_key161] = _val162 + (_ktype164, _vtype165, _size163 ) = iprot.readMapBegin() + for _i167 in xrange(_size163): + _key168 = iprot.readString() + _val169 = iprot.readString() + self.parameters[_key168] = _val169 iprot.readMapEnd() else: iprot.skip(ftype) @@ -3815,8 +4345,8 @@ def write(self, oprot): if self.cols is not None: oprot.writeFieldBegin('cols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.cols)) - for iter163 in self.cols: - iter163.write(oprot) + for iter170 in self.cols: + iter170.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.location is not None: @@ -3846,23 +4376,23 @@ def write(self, oprot): if self.bucketCols is not None: oprot.writeFieldBegin('bucketCols', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.bucketCols)) - for iter164 in self.bucketCols: - oprot.writeString(iter164) + for iter171 in self.bucketCols: + oprot.writeString(iter171) oprot.writeListEnd() oprot.writeFieldEnd() if self.sortCols is not None: oprot.writeFieldBegin('sortCols', TType.LIST, 9) oprot.writeListBegin(TType.STRUCT, len(self.sortCols)) - for iter165 in self.sortCols: - iter165.write(oprot) + for iter172 in self.sortCols: + iter172.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 10) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter166,viter167 in self.parameters.items(): - oprot.writeString(kiter166) - oprot.writeString(viter167) + for kiter173,viter174 in self.parameters.items(): + oprot.writeString(kiter173) + oprot.writeString(viter174) oprot.writeMapEnd() oprot.writeFieldEnd() if self.skewedInfo is not None: @@ -3926,6 +4456,7 @@ class Table: - temporary - rewriteEnabled - creationMetadata + - catName """ thrift_spec = ( @@ -3946,9 +4477,10 @@ class Table: (14, TType.BOOL, 'temporary', None, False, ), # 14 (15, TType.BOOL, 'rewriteEnabled', None, None, ), # 15 (16, TType.STRUCT, 'creationMetadata', (CreationMetadata, CreationMetadata.thrift_spec), None, ), # 16 + (17, TType.STRING, 'catName', None, None, ), # 17 ) - def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, creationMetadata=None,): + def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, creationMetadata=None, catName=None,): self.tableName = tableName self.dbName = dbName self.owner = owner @@ -3965,6 +4497,7 @@ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, las self.temporary = temporary self.rewriteEnabled = rewriteEnabled self.creationMetadata = creationMetadata + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -4014,22 +4547,22 @@ def read(self, iprot): elif fid == 8: if ftype == TType.LIST: self.partitionKeys = [] - (_etype171, _size168) = iprot.readListBegin() - for _i172 in xrange(_size168): - _elem173 = FieldSchema() - _elem173.read(iprot) - self.partitionKeys.append(_elem173) + (_etype178, _size175) = iprot.readListBegin() + for _i179 in xrange(_size175): + _elem180 = FieldSchema() + _elem180.read(iprot) + self.partitionKeys.append(_elem180) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 9: if ftype == TType.MAP: self.parameters = {} - (_ktype175, _vtype176, _size174 ) = iprot.readMapBegin() - for _i178 in xrange(_size174): - _key179 = iprot.readString() - _val180 = iprot.readString() - self.parameters[_key179] = _val180 + (_ktype182, _vtype183, _size181 ) = iprot.readMapBegin() + for _i185 in xrange(_size181): + _key186 = iprot.readString() + _val187 = iprot.readString() + self.parameters[_key186] = _val187 iprot.readMapEnd() else: iprot.skip(ftype) @@ -4070,6 +4603,11 @@ def read(self, iprot): self.creationMetadata.read(iprot) else: iprot.skip(ftype) + elif fid == 17: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -4111,16 +4649,16 @@ def write(self, oprot): if self.partitionKeys is not None: oprot.writeFieldBegin('partitionKeys', TType.LIST, 8) oprot.writeListBegin(TType.STRUCT, len(self.partitionKeys)) - for iter181 in self.partitionKeys: - iter181.write(oprot) + for iter188 in self.partitionKeys: + iter188.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 9) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter182,viter183 in self.parameters.items(): - oprot.writeString(kiter182) - oprot.writeString(viter183) + for kiter189,viter190 in self.parameters.items(): + oprot.writeString(kiter189) + oprot.writeString(viter190) oprot.writeMapEnd() oprot.writeFieldEnd() if self.viewOriginalText is not None: @@ -4151,6 +4689,10 @@ def write(self, oprot): oprot.writeFieldBegin('creationMetadata', TType.STRUCT, 16) self.creationMetadata.write(oprot) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 17) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -4176,6 +4718,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.temporary) value = (value * 31) ^ hash(self.rewriteEnabled) value = (value * 31) ^ hash(self.creationMetadata) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -4200,6 +4743,7 @@ class Partition: - sd - parameters - privileges + - catName """ thrift_spec = ( @@ -4212,9 +4756,10 @@ class Partition: (6, TType.STRUCT, 'sd', (StorageDescriptor, StorageDescriptor.thrift_spec), None, ), # 6 (7, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 7 (8, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 8 + (9, TType.STRING, 'catName', None, None, ), # 9 ) - def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None,): + def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None, catName=None,): self.values = values self.dbName = dbName self.tableName = tableName @@ -4223,6 +4768,7 @@ def __init__(self, values=None, dbName=None, tableName=None, createTime=None, la self.sd = sd self.parameters = parameters self.privileges = privileges + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -4236,10 +4782,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.values = [] - (_etype187, _size184) = iprot.readListBegin() - for _i188 in xrange(_size184): - _elem189 = iprot.readString() - self.values.append(_elem189) + (_etype194, _size191) = iprot.readListBegin() + for _i195 in xrange(_size191): + _elem196 = iprot.readString() + self.values.append(_elem196) iprot.readListEnd() else: iprot.skip(ftype) @@ -4272,11 +4818,11 @@ def read(self, iprot): elif fid == 7: if ftype == TType.MAP: self.parameters = {} - (_ktype191, _vtype192, _size190 ) = iprot.readMapBegin() - for _i194 in xrange(_size190): - _key195 = iprot.readString() - _val196 = iprot.readString() - self.parameters[_key195] = _val196 + (_ktype198, _vtype199, _size197 ) = iprot.readMapBegin() + for _i201 in xrange(_size197): + _key202 = iprot.readString() + _val203 = iprot.readString() + self.parameters[_key202] = _val203 iprot.readMapEnd() else: iprot.skip(ftype) @@ -4286,6 +4832,11 @@ def read(self, iprot): self.privileges.read(iprot) else: iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -4299,8 +4850,8 @@ def write(self, oprot): if self.values is not None: oprot.writeFieldBegin('values', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.values)) - for iter197 in self.values: - oprot.writeString(iter197) + for iter204 in self.values: + oprot.writeString(iter204) oprot.writeListEnd() oprot.writeFieldEnd() if self.dbName is not None: @@ -4326,15 +4877,19 @@ def write(self, oprot): if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 7) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter198,viter199 in self.parameters.items(): - oprot.writeString(kiter198) - oprot.writeString(viter199) + for kiter205,viter206 in self.parameters.items(): + oprot.writeString(kiter205) + oprot.writeString(viter206) oprot.writeMapEnd() oprot.writeFieldEnd() if self.privileges is not None: oprot.writeFieldBegin('privileges', TType.STRUCT, 8) self.privileges.write(oprot) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 9) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -4352,6 +4907,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.sd) value = (value * 31) ^ hash(self.parameters) value = (value * 31) ^ hash(self.privileges) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -4406,10 +4962,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.values = [] - (_etype203, _size200) = iprot.readListBegin() - for _i204 in xrange(_size200): - _elem205 = iprot.readString() - self.values.append(_elem205) + (_etype210, _size207) = iprot.readListBegin() + for _i211 in xrange(_size207): + _elem212 = iprot.readString() + self.values.append(_elem212) iprot.readListEnd() else: iprot.skip(ftype) @@ -4431,11 +4987,11 @@ def read(self, iprot): elif fid == 5: if ftype == TType.MAP: self.parameters = {} - (_ktype207, _vtype208, _size206 ) = iprot.readMapBegin() - for _i210 in xrange(_size206): - _key211 = iprot.readString() - _val212 = iprot.readString() - self.parameters[_key211] = _val212 + (_ktype214, _vtype215, _size213 ) = iprot.readMapBegin() + for _i217 in xrange(_size213): + _key218 = iprot.readString() + _val219 = iprot.readString() + self.parameters[_key218] = _val219 iprot.readMapEnd() else: iprot.skip(ftype) @@ -4458,8 +5014,8 @@ def write(self, oprot): if self.values is not None: oprot.writeFieldBegin('values', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.values)) - for iter213 in self.values: - oprot.writeString(iter213) + for iter220 in self.values: + oprot.writeString(iter220) oprot.writeListEnd() oprot.writeFieldEnd() if self.createTime is not None: @@ -4477,9 +5033,9 @@ def write(self, oprot): if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 5) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter214,viter215 in self.parameters.items(): - oprot.writeString(kiter214) - oprot.writeString(viter215) + for kiter221,viter222 in self.parameters.items(): + oprot.writeString(kiter221) + oprot.writeString(viter222) oprot.writeMapEnd() oprot.writeFieldEnd() if self.privileges is not None: @@ -4543,11 +5099,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype219, _size216) = iprot.readListBegin() - for _i220 in xrange(_size216): - _elem221 = PartitionWithoutSD() - _elem221.read(iprot) - self.partitions.append(_elem221) + (_etype226, _size223) = iprot.readListBegin() + for _i227 in xrange(_size223): + _elem228 = PartitionWithoutSD() + _elem228.read(iprot) + self.partitions.append(_elem228) iprot.readListEnd() else: iprot.skip(ftype) @@ -4570,8 +5126,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter222 in self.partitions: - iter222.write(oprot) + for iter229 in self.partitions: + iter229.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.sd is not None: @@ -4628,11 +5184,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype226, _size223) = iprot.readListBegin() - for _i227 in xrange(_size223): - _elem228 = Partition() - _elem228.read(iprot) - self.partitions.append(_elem228) + (_etype233, _size230) = iprot.readListBegin() + for _i234 in xrange(_size230): + _elem235 = Partition() + _elem235.read(iprot) + self.partitions.append(_elem235) iprot.readListEnd() else: iprot.skip(ftype) @@ -4649,8 +5205,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter229 in self.partitions: - iter229.write(oprot) + for iter236 in self.partitions: + iter236.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -4684,6 +5240,7 @@ class PartitionSpec: - rootPath - sharedSDPartitionSpec - partitionList + - catName """ thrift_spec = ( @@ -4693,14 +5250,16 @@ class PartitionSpec: (3, TType.STRING, 'rootPath', None, None, ), # 3 (4, TType.STRUCT, 'sharedSDPartitionSpec', (PartitionSpecWithSharedSD, PartitionSpecWithSharedSD.thrift_spec), None, ), # 4 (5, TType.STRUCT, 'partitionList', (PartitionListComposingSpec, PartitionListComposingSpec.thrift_spec), None, ), # 5 + (6, TType.STRING, 'catName', None, None, ), # 6 ) - def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartitionSpec=None, partitionList=None,): + def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartitionSpec=None, partitionList=None, catName=None,): self.dbName = dbName self.tableName = tableName self.rootPath = rootPath self.sharedSDPartitionSpec = sharedSDPartitionSpec self.partitionList = partitionList + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -4738,6 +5297,11 @@ def read(self, iprot): self.partitionList.read(iprot) else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -4768,6 +5332,10 @@ def write(self, oprot): oprot.writeFieldBegin('partitionList', TType.STRUCT, 5) self.partitionList.write(oprot) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 6) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -4782,6 +5350,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.rootPath) value = (value * 31) ^ hash(self.sharedSDPartitionSpec) value = (value * 31) ^ hash(self.partitionList) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -6034,6 +6603,7 @@ class ColumnStatisticsDesc: - tableName - partName - lastAnalyzed + - catName """ thrift_spec = ( @@ -6043,14 +6613,16 @@ class ColumnStatisticsDesc: (3, TType.STRING, 'tableName', None, None, ), # 3 (4, TType.STRING, 'partName', None, None, ), # 4 (5, TType.I64, 'lastAnalyzed', None, None, ), # 5 + (6, TType.STRING, 'catName', None, None, ), # 6 ) - def __init__(self, isTblLevel=None, dbName=None, tableName=None, partName=None, lastAnalyzed=None,): + def __init__(self, isTblLevel=None, dbName=None, tableName=None, partName=None, lastAnalyzed=None, catName=None,): self.isTblLevel = isTblLevel self.dbName = dbName self.tableName = tableName self.partName = partName self.lastAnalyzed = lastAnalyzed + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -6086,6 +6658,11 @@ def read(self, iprot): self.lastAnalyzed = iprot.readI64() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -6116,6 +6693,10 @@ def write(self, oprot): oprot.writeFieldBegin('lastAnalyzed', TType.I64, 5) oprot.writeI64(self.lastAnalyzed) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 6) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -6136,6 +6717,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tableName) value = (value * 31) ^ hash(self.partName) value = (value * 31) ^ hash(self.lastAnalyzed) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -6184,11 +6766,11 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.statsObj = [] - (_etype233, _size230) = iprot.readListBegin() - for _i234 in xrange(_size230): - _elem235 = ColumnStatisticsObj() - _elem235.read(iprot) - self.statsObj.append(_elem235) + (_etype240, _size237) = iprot.readListBegin() + for _i241 in xrange(_size237): + _elem242 = ColumnStatisticsObj() + _elem242.read(iprot) + self.statsObj.append(_elem242) iprot.readListEnd() else: iprot.skip(ftype) @@ -6209,8 +6791,8 @@ def write(self, oprot): if self.statsObj is not None: oprot.writeFieldBegin('statsObj', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.statsObj)) - for iter236 in self.statsObj: - iter236.write(oprot) + for iter243 in self.statsObj: + iter243.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -6270,11 +6852,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.colStats = [] - (_etype240, _size237) = iprot.readListBegin() - for _i241 in xrange(_size237): - _elem242 = ColumnStatisticsObj() - _elem242.read(iprot) - self.colStats.append(_elem242) + (_etype247, _size244) = iprot.readListBegin() + for _i248 in xrange(_size244): + _elem249 = ColumnStatisticsObj() + _elem249.read(iprot) + self.colStats.append(_elem249) iprot.readListEnd() else: iprot.skip(ftype) @@ -6296,8 +6878,8 @@ def write(self, oprot): if self.colStats is not None: oprot.writeFieldBegin('colStats', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.colStats)) - for iter243 in self.colStats: - iter243.write(oprot) + for iter250 in self.colStats: + iter250.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.partsFound is not None: @@ -6361,11 +6943,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.colStats = [] - (_etype247, _size244) = iprot.readListBegin() - for _i248 in xrange(_size244): - _elem249 = ColumnStatistics() - _elem249.read(iprot) - self.colStats.append(_elem249) + (_etype254, _size251) = iprot.readListBegin() + for _i255 in xrange(_size251): + _elem256 = ColumnStatistics() + _elem256.read(iprot) + self.colStats.append(_elem256) iprot.readListEnd() else: iprot.skip(ftype) @@ -6387,8 +6969,8 @@ def write(self, oprot): if self.colStats is not None: oprot.writeFieldBegin('colStats', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.colStats)) - for iter250 in self.colStats: - iter250.write(oprot) + for iter257 in self.colStats: + iter257.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.needMerge is not None: @@ -6450,22 +7032,22 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fieldSchemas = [] - (_etype254, _size251) = iprot.readListBegin() - for _i255 in xrange(_size251): - _elem256 = FieldSchema() - _elem256.read(iprot) - self.fieldSchemas.append(_elem256) + (_etype261, _size258) = iprot.readListBegin() + for _i262 in xrange(_size258): + _elem263 = FieldSchema() + _elem263.read(iprot) + self.fieldSchemas.append(_elem263) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: self.properties = {} - (_ktype258, _vtype259, _size257 ) = iprot.readMapBegin() - for _i261 in xrange(_size257): - _key262 = iprot.readString() - _val263 = iprot.readString() - self.properties[_key262] = _val263 + (_ktype265, _vtype266, _size264 ) = iprot.readMapBegin() + for _i268 in xrange(_size264): + _key269 = iprot.readString() + _val270 = iprot.readString() + self.properties[_key269] = _val270 iprot.readMapEnd() else: iprot.skip(ftype) @@ -6482,16 +7064,16 @@ def write(self, oprot): if self.fieldSchemas is not None: oprot.writeFieldBegin('fieldSchemas', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.fieldSchemas)) - for iter264 in self.fieldSchemas: - iter264.write(oprot) + for iter271 in self.fieldSchemas: + iter271.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter265,viter266 in self.properties.items(): - oprot.writeString(kiter265) - oprot.writeString(viter266) + for kiter272,viter273 in self.properties.items(): + oprot.writeString(kiter272) + oprot.writeString(viter273) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -6544,11 +7126,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.properties = {} - (_ktype268, _vtype269, _size267 ) = iprot.readMapBegin() - for _i271 in xrange(_size267): - _key272 = iprot.readString() - _val273 = iprot.readString() - self.properties[_key272] = _val273 + (_ktype275, _vtype276, _size274 ) = iprot.readMapBegin() + for _i278 in xrange(_size274): + _key279 = iprot.readString() + _val280 = iprot.readString() + self.properties[_key279] = _val280 iprot.readMapEnd() else: iprot.skip(ftype) @@ -6565,9 +7147,9 @@ def write(self, oprot): if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter274,viter275 in self.properties.items(): - oprot.writeString(kiter274) - oprot.writeString(viter275) + for kiter281,viter282 in self.properties.items(): + oprot.writeString(kiter281) + oprot.writeString(viter282) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -6598,17 +7180,20 @@ class PrimaryKeysRequest: Attributes: - db_name - tbl_name + - catName """ thrift_spec = ( None, # 0 (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (3, TType.STRING, 'catName', None, None, ), # 3 ) - def __init__(self, db_name=None, tbl_name=None,): + def __init__(self, db_name=None, tbl_name=None, catName=None,): self.db_name = db_name self.tbl_name = tbl_name + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -6629,6 +7214,11 @@ def read(self, iprot): self.tbl_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -6647,6 +7237,10 @@ def write(self, oprot): oprot.writeFieldBegin('tbl_name', TType.STRING, 2) oprot.writeString(self.tbl_name) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 3) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -6662,6 +7256,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -6701,11 +7296,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.primaryKeys = [] - (_etype279, _size276) = iprot.readListBegin() - for _i280 in xrange(_size276): - _elem281 = SQLPrimaryKey() - _elem281.read(iprot) - self.primaryKeys.append(_elem281) + (_etype286, _size283) = iprot.readListBegin() + for _i287 in xrange(_size283): + _elem288 = SQLPrimaryKey() + _elem288.read(iprot) + self.primaryKeys.append(_elem288) iprot.readListEnd() else: iprot.skip(ftype) @@ -6722,8 +7317,8 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter282 in self.primaryKeys: - iter282.write(oprot) + for iter289 in self.primaryKeys: + iter289.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -6758,6 +7353,7 @@ class ForeignKeysRequest: - parent_tbl_name - foreign_db_name - foreign_tbl_name + - catName """ thrift_spec = ( @@ -6766,13 +7362,15 @@ class ForeignKeysRequest: (2, TType.STRING, 'parent_tbl_name', None, None, ), # 2 (3, TType.STRING, 'foreign_db_name', None, None, ), # 3 (4, TType.STRING, 'foreign_tbl_name', None, None, ), # 4 + (5, TType.STRING, 'catName', None, None, ), # 5 ) - def __init__(self, parent_db_name=None, parent_tbl_name=None, foreign_db_name=None, foreign_tbl_name=None,): + def __init__(self, parent_db_name=None, parent_tbl_name=None, foreign_db_name=None, foreign_tbl_name=None, catName=None,): self.parent_db_name = parent_db_name self.parent_tbl_name = parent_tbl_name self.foreign_db_name = foreign_db_name self.foreign_tbl_name = foreign_tbl_name + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -6803,6 +7401,11 @@ def read(self, iprot): self.foreign_tbl_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -6829,6 +7432,10 @@ def write(self, oprot): oprot.writeFieldBegin('foreign_tbl_name', TType.STRING, 4) oprot.writeString(self.foreign_tbl_name) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 5) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -6842,6 +7449,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.parent_tbl_name) value = (value * 31) ^ hash(self.foreign_db_name) value = (value * 31) ^ hash(self.foreign_tbl_name) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -6881,11 +7489,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.foreignKeys = [] - (_etype286, _size283) = iprot.readListBegin() - for _i287 in xrange(_size283): - _elem288 = SQLForeignKey() - _elem288.read(iprot) - self.foreignKeys.append(_elem288) + (_etype293, _size290) = iprot.readListBegin() + for _i294 in xrange(_size290): + _elem295 = SQLForeignKey() + _elem295.read(iprot) + self.foreignKeys.append(_elem295) iprot.readListEnd() else: iprot.skip(ftype) @@ -6902,8 +7510,8 @@ def write(self, oprot): if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter289 in self.foreignKeys: - iter289.write(oprot) + for iter296 in self.foreignKeys: + iter296.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -6934,17 +7542,20 @@ def __ne__(self, other): class UniqueConstraintsRequest: """ Attributes: + - catName - db_name - tbl_name """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'db_name', None, None, ), # 1 - (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'db_name', None, None, ), # 2 + (3, TType.STRING, 'tbl_name', None, None, ), # 3 ) - def __init__(self, db_name=None, tbl_name=None,): + def __init__(self, catName=None, db_name=None, tbl_name=None,): + self.catName = catName self.db_name = db_name self.tbl_name = tbl_name @@ -6959,11 +7570,16 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.db_name = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: + self.db_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: self.tbl_name = iprot.readString() else: iprot.skip(ftype) @@ -6977,18 +7593,24 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('UniqueConstraintsRequest') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.db_name is not None: - oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeFieldBegin('db_name', TType.STRING, 2) oprot.writeString(self.db_name) oprot.writeFieldEnd() if self.tbl_name is not None: - oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeFieldBegin('tbl_name', TType.STRING, 3) oprot.writeString(self.tbl_name) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): + if self.catName is None: + raise TProtocol.TProtocolException(message='Required field catName is unset!') if self.db_name is None: raise TProtocol.TProtocolException(message='Required field db_name is unset!') if self.tbl_name is None: @@ -6998,6 +7620,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) return value @@ -7039,11 +7662,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype293, _size290) = iprot.readListBegin() - for _i294 in xrange(_size290): - _elem295 = SQLUniqueConstraint() - _elem295.read(iprot) - self.uniqueConstraints.append(_elem295) + (_etype300, _size297) = iprot.readListBegin() + for _i301 in xrange(_size297): + _elem302 = SQLUniqueConstraint() + _elem302.read(iprot) + self.uniqueConstraints.append(_elem302) iprot.readListEnd() else: iprot.skip(ftype) @@ -7060,8 +7683,8 @@ def write(self, oprot): if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter296 in self.uniqueConstraints: - iter296.write(oprot) + for iter303 in self.uniqueConstraints: + iter303.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7092,17 +7715,20 @@ def __ne__(self, other): class NotNullConstraintsRequest: """ Attributes: + - catName - db_name - tbl_name """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'db_name', None, None, ), # 1 - (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'db_name', None, None, ), # 2 + (3, TType.STRING, 'tbl_name', None, None, ), # 3 ) - def __init__(self, db_name=None, tbl_name=None,): + def __init__(self, catName=None, db_name=None, tbl_name=None,): + self.catName = catName self.db_name = db_name self.tbl_name = tbl_name @@ -7117,11 +7743,16 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.db_name = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: + self.db_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: self.tbl_name = iprot.readString() else: iprot.skip(ftype) @@ -7135,18 +7766,24 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('NotNullConstraintsRequest') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.db_name is not None: - oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeFieldBegin('db_name', TType.STRING, 2) oprot.writeString(self.db_name) oprot.writeFieldEnd() if self.tbl_name is not None: - oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeFieldBegin('tbl_name', TType.STRING, 3) oprot.writeString(self.tbl_name) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): + if self.catName is None: + raise TProtocol.TProtocolException(message='Required field catName is unset!') if self.db_name is None: raise TProtocol.TProtocolException(message='Required field db_name is unset!') if self.tbl_name is None: @@ -7156,6 +7793,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) return value @@ -7197,11 +7835,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype300, _size297) = iprot.readListBegin() - for _i301 in xrange(_size297): - _elem302 = SQLNotNullConstraint() - _elem302.read(iprot) - self.notNullConstraints.append(_elem302) + (_etype307, _size304) = iprot.readListBegin() + for _i308 in xrange(_size304): + _elem309 = SQLNotNullConstraint() + _elem309.read(iprot) + self.notNullConstraints.append(_elem309) iprot.readListEnd() else: iprot.skip(ftype) @@ -7218,8 +7856,8 @@ def write(self, oprot): if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter303 in self.notNullConstraints: - iter303.write(oprot) + for iter310 in self.notNullConstraints: + iter310.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7250,17 +7888,20 @@ def __ne__(self, other): class DefaultConstraintsRequest: """ Attributes: + - catName - db_name - tbl_name """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'db_name', None, None, ), # 1 - (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'db_name', None, None, ), # 2 + (3, TType.STRING, 'tbl_name', None, None, ), # 3 ) - def __init__(self, db_name=None, tbl_name=None,): + def __init__(self, catName=None, db_name=None, tbl_name=None,): + self.catName = catName self.db_name = db_name self.tbl_name = tbl_name @@ -7275,11 +7916,16 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.db_name = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: + self.db_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: self.tbl_name = iprot.readString() else: iprot.skip(ftype) @@ -7293,18 +7939,24 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('DefaultConstraintsRequest') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.db_name is not None: - oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeFieldBegin('db_name', TType.STRING, 2) oprot.writeString(self.db_name) oprot.writeFieldEnd() if self.tbl_name is not None: - oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeFieldBegin('tbl_name', TType.STRING, 3) oprot.writeString(self.tbl_name) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): + if self.catName is None: + raise TProtocol.TProtocolException(message='Required field catName is unset!') if self.db_name is None: raise TProtocol.TProtocolException(message='Required field db_name is unset!') if self.tbl_name is None: @@ -7314,6 +7966,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) return value @@ -7355,11 +8008,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.defaultConstraints = [] - (_etype307, _size304) = iprot.readListBegin() - for _i308 in xrange(_size304): - _elem309 = SQLDefaultConstraint() - _elem309.read(iprot) - self.defaultConstraints.append(_elem309) + (_etype314, _size311) = iprot.readListBegin() + for _i315 in xrange(_size311): + _elem316 = SQLDefaultConstraint() + _elem316.read(iprot) + self.defaultConstraints.append(_elem316) iprot.readListEnd() else: iprot.skip(ftype) @@ -7376,8 +8029,8 @@ def write(self, oprot): if self.defaultConstraints is not None: oprot.writeFieldBegin('defaultConstraints', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) - for iter310 in self.defaultConstraints: - iter310.write(oprot) + for iter317 in self.defaultConstraints: + iter317.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7408,17 +8061,20 @@ def __ne__(self, other): class CheckConstraintsRequest: """ Attributes: + - catName - db_name - tbl_name """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'db_name', None, None, ), # 1 - (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'db_name', None, None, ), # 2 + (3, TType.STRING, 'tbl_name', None, None, ), # 3 ) - def __init__(self, db_name=None, tbl_name=None,): + def __init__(self, catName=None, db_name=None, tbl_name=None,): + self.catName = catName self.db_name = db_name self.tbl_name = tbl_name @@ -7433,11 +8089,16 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.db_name = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: + self.db_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: self.tbl_name = iprot.readString() else: iprot.skip(ftype) @@ -7451,18 +8112,24 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('CheckConstraintsRequest') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.db_name is not None: - oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeFieldBegin('db_name', TType.STRING, 2) oprot.writeString(self.db_name) oprot.writeFieldEnd() if self.tbl_name is not None: - oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeFieldBegin('tbl_name', TType.STRING, 3) oprot.writeString(self.tbl_name) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): + if self.catName is None: + raise TProtocol.TProtocolException(message='Required field catName is unset!') if self.db_name is None: raise TProtocol.TProtocolException(message='Required field db_name is unset!') if self.tbl_name is None: @@ -7472,6 +8139,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) return value @@ -7513,11 +8181,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.checkConstraints = [] - (_etype314, _size311) = iprot.readListBegin() - for _i315 in xrange(_size311): - _elem316 = SQLCheckConstraint() - _elem316.read(iprot) - self.checkConstraints.append(_elem316) + (_etype321, _size318) = iprot.readListBegin() + for _i322 in xrange(_size318): + _elem323 = SQLCheckConstraint() + _elem323.read(iprot) + self.checkConstraints.append(_elem323) iprot.readListEnd() else: iprot.skip(ftype) @@ -7534,8 +8202,8 @@ def write(self, oprot): if self.checkConstraints is not None: oprot.writeFieldBegin('checkConstraints', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) - for iter317 in self.checkConstraints: - iter317.write(oprot) + for iter324 in self.checkConstraints: + iter324.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7569,6 +8237,7 @@ class DropConstraintRequest: - dbname - tablename - constraintname + - catName """ thrift_spec = ( @@ -7576,12 +8245,14 @@ class DropConstraintRequest: (1, TType.STRING, 'dbname', None, None, ), # 1 (2, TType.STRING, 'tablename', None, None, ), # 2 (3, TType.STRING, 'constraintname', None, None, ), # 3 + (4, TType.STRING, 'catName', None, None, ), # 4 ) - def __init__(self, dbname=None, tablename=None, constraintname=None,): + def __init__(self, dbname=None, tablename=None, constraintname=None, catName=None,): self.dbname = dbname self.tablename = tablename self.constraintname = constraintname + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -7607,6 +8278,11 @@ def read(self, iprot): self.constraintname = iprot.readString() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -7629,6 +8305,10 @@ def write(self, oprot): oprot.writeFieldBegin('constraintname', TType.STRING, 3) oprot.writeString(self.constraintname) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 4) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -7647,6 +8327,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.dbname) value = (value * 31) ^ hash(self.tablename) value = (value * 31) ^ hash(self.constraintname) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -7686,11 +8367,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.primaryKeyCols = [] - (_etype321, _size318) = iprot.readListBegin() - for _i322 in xrange(_size318): - _elem323 = SQLPrimaryKey() - _elem323.read(iprot) - self.primaryKeyCols.append(_elem323) + (_etype328, _size325) = iprot.readListBegin() + for _i329 in xrange(_size325): + _elem330 = SQLPrimaryKey() + _elem330.read(iprot) + self.primaryKeyCols.append(_elem330) iprot.readListEnd() else: iprot.skip(ftype) @@ -7707,8 +8388,8 @@ def write(self, oprot): if self.primaryKeyCols is not None: oprot.writeFieldBegin('primaryKeyCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeyCols)) - for iter324 in self.primaryKeyCols: - iter324.write(oprot) + for iter331 in self.primaryKeyCols: + iter331.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7762,11 +8443,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.foreignKeyCols = [] - (_etype328, _size325) = iprot.readListBegin() - for _i329 in xrange(_size325): - _elem330 = SQLForeignKey() - _elem330.read(iprot) - self.foreignKeyCols.append(_elem330) + (_etype335, _size332) = iprot.readListBegin() + for _i336 in xrange(_size332): + _elem337 = SQLForeignKey() + _elem337.read(iprot) + self.foreignKeyCols.append(_elem337) iprot.readListEnd() else: iprot.skip(ftype) @@ -7783,8 +8464,8 @@ def write(self, oprot): if self.foreignKeyCols is not None: oprot.writeFieldBegin('foreignKeyCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeyCols)) - for iter331 in self.foreignKeyCols: - iter331.write(oprot) + for iter338 in self.foreignKeyCols: + iter338.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7838,11 +8519,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.uniqueConstraintCols = [] - (_etype335, _size332) = iprot.readListBegin() - for _i336 in xrange(_size332): - _elem337 = SQLUniqueConstraint() - _elem337.read(iprot) - self.uniqueConstraintCols.append(_elem337) + (_etype342, _size339) = iprot.readListBegin() + for _i343 in xrange(_size339): + _elem344 = SQLUniqueConstraint() + _elem344.read(iprot) + self.uniqueConstraintCols.append(_elem344) iprot.readListEnd() else: iprot.skip(ftype) @@ -7859,8 +8540,8 @@ def write(self, oprot): if self.uniqueConstraintCols is not None: oprot.writeFieldBegin('uniqueConstraintCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraintCols)) - for iter338 in self.uniqueConstraintCols: - iter338.write(oprot) + for iter345 in self.uniqueConstraintCols: + iter345.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7914,11 +8595,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.notNullConstraintCols = [] - (_etype342, _size339) = iprot.readListBegin() - for _i343 in xrange(_size339): - _elem344 = SQLNotNullConstraint() - _elem344.read(iprot) - self.notNullConstraintCols.append(_elem344) + (_etype349, _size346) = iprot.readListBegin() + for _i350 in xrange(_size346): + _elem351 = SQLNotNullConstraint() + _elem351.read(iprot) + self.notNullConstraintCols.append(_elem351) iprot.readListEnd() else: iprot.skip(ftype) @@ -7935,8 +8616,8 @@ def write(self, oprot): if self.notNullConstraintCols is not None: oprot.writeFieldBegin('notNullConstraintCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraintCols)) - for iter345 in self.notNullConstraintCols: - iter345.write(oprot) + for iter352 in self.notNullConstraintCols: + iter352.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7990,11 +8671,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.defaultConstraintCols = [] - (_etype349, _size346) = iprot.readListBegin() - for _i350 in xrange(_size346): - _elem351 = SQLDefaultConstraint() - _elem351.read(iprot) - self.defaultConstraintCols.append(_elem351) + (_etype356, _size353) = iprot.readListBegin() + for _i357 in xrange(_size353): + _elem358 = SQLDefaultConstraint() + _elem358.read(iprot) + self.defaultConstraintCols.append(_elem358) iprot.readListEnd() else: iprot.skip(ftype) @@ -8011,8 +8692,8 @@ def write(self, oprot): if self.defaultConstraintCols is not None: oprot.writeFieldBegin('defaultConstraintCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraintCols)) - for iter352 in self.defaultConstraintCols: - iter352.write(oprot) + for iter359 in self.defaultConstraintCols: + iter359.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8066,11 +8747,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.checkConstraintCols = [] - (_etype356, _size353) = iprot.readListBegin() - for _i357 in xrange(_size353): - _elem358 = SQLCheckConstraint() - _elem358.read(iprot) - self.checkConstraintCols.append(_elem358) + (_etype363, _size360) = iprot.readListBegin() + for _i364 in xrange(_size360): + _elem365 = SQLCheckConstraint() + _elem365.read(iprot) + self.checkConstraintCols.append(_elem365) iprot.readListEnd() else: iprot.skip(ftype) @@ -8087,8 +8768,8 @@ def write(self, oprot): if self.checkConstraintCols is not None: oprot.writeFieldBegin('checkConstraintCols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraintCols)) - for iter359 in self.checkConstraintCols: - iter359.write(oprot) + for iter366 in self.checkConstraintCols: + iter366.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8145,11 +8826,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype363, _size360) = iprot.readListBegin() - for _i364 in xrange(_size360): - _elem365 = Partition() - _elem365.read(iprot) - self.partitions.append(_elem365) + (_etype370, _size367) = iprot.readListBegin() + for _i371 in xrange(_size367): + _elem372 = Partition() + _elem372.read(iprot) + self.partitions.append(_elem372) iprot.readListEnd() else: iprot.skip(ftype) @@ -8171,8 +8852,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter366 in self.partitions: - iter366.write(oprot) + for iter373 in self.partitions: + iter373.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.hasUnknownPartitions is not None: @@ -8215,6 +8896,7 @@ class PartitionsByExprRequest: - expr - defaultPartitionName - maxParts + - catName """ thrift_spec = ( @@ -8224,14 +8906,16 @@ class PartitionsByExprRequest: (3, TType.STRING, 'expr', None, None, ), # 3 (4, TType.STRING, 'defaultPartitionName', None, None, ), # 4 (5, TType.I16, 'maxParts', None, -1, ), # 5 + (6, TType.STRING, 'catName', None, None, ), # 6 ) - def __init__(self, dbName=None, tblName=None, expr=None, defaultPartitionName=None, maxParts=thrift_spec[5][4],): + def __init__(self, dbName=None, tblName=None, expr=None, defaultPartitionName=None, maxParts=thrift_spec[5][4], catName=None,): self.dbName = dbName self.tblName = tblName self.expr = expr self.defaultPartitionName = defaultPartitionName self.maxParts = maxParts + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -8267,6 +8951,11 @@ def read(self, iprot): self.maxParts = iprot.readI16() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -8297,6 +8986,10 @@ def write(self, oprot): oprot.writeFieldBegin('maxParts', TType.I16, 5) oprot.writeI16(self.maxParts) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 6) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -8317,6 +9010,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.expr) value = (value * 31) ^ hash(self.defaultPartitionName) value = (value * 31) ^ hash(self.maxParts) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -8356,11 +9050,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tableStats = [] - (_etype370, _size367) = iprot.readListBegin() - for _i371 in xrange(_size367): - _elem372 = ColumnStatisticsObj() - _elem372.read(iprot) - self.tableStats.append(_elem372) + (_etype377, _size374) = iprot.readListBegin() + for _i378 in xrange(_size374): + _elem379 = ColumnStatisticsObj() + _elem379.read(iprot) + self.tableStats.append(_elem379) iprot.readListEnd() else: iprot.skip(ftype) @@ -8377,8 +9071,8 @@ def write(self, oprot): if self.tableStats is not None: oprot.writeFieldBegin('tableStats', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tableStats)) - for iter373 in self.tableStats: - iter373.write(oprot) + for iter380 in self.tableStats: + iter380.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8432,17 +9126,17 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partStats = {} - (_ktype375, _vtype376, _size374 ) = iprot.readMapBegin() - for _i378 in xrange(_size374): - _key379 = iprot.readString() - _val380 = [] - (_etype384, _size381) = iprot.readListBegin() - for _i385 in xrange(_size381): - _elem386 = ColumnStatisticsObj() - _elem386.read(iprot) - _val380.append(_elem386) + (_ktype382, _vtype383, _size381 ) = iprot.readMapBegin() + for _i385 in xrange(_size381): + _key386 = iprot.readString() + _val387 = [] + (_etype391, _size388) = iprot.readListBegin() + for _i392 in xrange(_size388): + _elem393 = ColumnStatisticsObj() + _elem393.read(iprot) + _val387.append(_elem393) iprot.readListEnd() - self.partStats[_key379] = _val380 + self.partStats[_key386] = _val387 iprot.readMapEnd() else: iprot.skip(ftype) @@ -8459,11 +9153,11 @@ def write(self, oprot): if self.partStats is not None: oprot.writeFieldBegin('partStats', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.partStats)) - for kiter387,viter388 in self.partStats.items(): - oprot.writeString(kiter387) - oprot.writeListBegin(TType.STRUCT, len(viter388)) - for iter389 in viter388: - iter389.write(oprot) + for kiter394,viter395 in self.partStats.items(): + oprot.writeString(kiter394) + oprot.writeListBegin(TType.STRUCT, len(viter395)) + for iter396 in viter395: + iter396.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() @@ -8498,6 +9192,7 @@ class TableStatsRequest: - dbName - tblName - colNames + - catName """ thrift_spec = ( @@ -8505,12 +9200,14 @@ class TableStatsRequest: (1, TType.STRING, 'dbName', None, None, ), # 1 (2, TType.STRING, 'tblName', None, None, ), # 2 (3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3 + (4, TType.STRING, 'catName', None, None, ), # 4 ) - def __init__(self, dbName=None, tblName=None, colNames=None,): + def __init__(self, dbName=None, tblName=None, colNames=None, catName=None,): self.dbName = dbName self.tblName = tblName self.colNames = colNames + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -8534,13 +9231,18 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.colNames = [] - (_etype393, _size390) = iprot.readListBegin() - for _i394 in xrange(_size390): - _elem395 = iprot.readString() - self.colNames.append(_elem395) + (_etype400, _size397) = iprot.readListBegin() + for _i401 in xrange(_size397): + _elem402 = iprot.readString() + self.colNames.append(_elem402) iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -8562,10 +9264,14 @@ def write(self, oprot): if self.colNames is not None: oprot.writeFieldBegin('colNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.colNames)) - for iter396 in self.colNames: - oprot.writeString(iter396) + for iter403 in self.colNames: + oprot.writeString(iter403) oprot.writeListEnd() oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 4) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -8584,6 +9290,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.dbName) value = (value * 31) ^ hash(self.tblName) value = (value * 31) ^ hash(self.colNames) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -8604,6 +9311,7 @@ class PartitionsStatsRequest: - tblName - colNames - partNames + - catName """ thrift_spec = ( @@ -8612,13 +9320,15 @@ class PartitionsStatsRequest: (2, TType.STRING, 'tblName', None, None, ), # 2 (3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3 (4, TType.LIST, 'partNames', (TType.STRING,None), None, ), # 4 + (5, TType.STRING, 'catName', None, None, ), # 5 ) - def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None,): + def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None, catName=None,): self.dbName = dbName self.tblName = tblName self.colNames = colNames self.partNames = partNames + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -8642,23 +9352,28 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.colNames = [] - (_etype400, _size397) = iprot.readListBegin() - for _i401 in xrange(_size397): - _elem402 = iprot.readString() - self.colNames.append(_elem402) + (_etype407, _size404) = iprot.readListBegin() + for _i408 in xrange(_size404): + _elem409 = iprot.readString() + self.colNames.append(_elem409) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.partNames = [] - (_etype406, _size403) = iprot.readListBegin() - for _i407 in xrange(_size403): - _elem408 = iprot.readString() - self.partNames.append(_elem408) + (_etype413, _size410) = iprot.readListBegin() + for _i414 in xrange(_size410): + _elem415 = iprot.readString() + self.partNames.append(_elem415) iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -8680,17 +9395,21 @@ def write(self, oprot): if self.colNames is not None: oprot.writeFieldBegin('colNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.colNames)) - for iter409 in self.colNames: - oprot.writeString(iter409) + for iter416 in self.colNames: + oprot.writeString(iter416) oprot.writeListEnd() oprot.writeFieldEnd() if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter410 in self.partNames: - oprot.writeString(iter410) + for iter417 in self.partNames: + oprot.writeString(iter417) oprot.writeListEnd() oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 5) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -8712,6 +9431,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tblName) value = (value * 31) ^ hash(self.colNames) value = (value * 31) ^ hash(self.partNames) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -8751,11 +9471,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype414, _size411) = iprot.readListBegin() - for _i415 in xrange(_size411): - _elem416 = Partition() - _elem416.read(iprot) - self.partitions.append(_elem416) + (_etype421, _size418) = iprot.readListBegin() + for _i422 in xrange(_size418): + _elem423 = Partition() + _elem423.read(iprot) + self.partitions.append(_elem423) iprot.readListEnd() else: iprot.skip(ftype) @@ -8772,8 +9492,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter417 in self.partitions: - iter417.write(oprot) + for iter424 in self.partitions: + iter424.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8807,6 +9527,7 @@ class AddPartitionsRequest: - parts - ifNotExists - needResult + - catName """ thrift_spec = ( @@ -8816,14 +9537,16 @@ class AddPartitionsRequest: (3, TType.LIST, 'parts', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 3 (4, TType.BOOL, 'ifNotExists', None, None, ), # 4 (5, TType.BOOL, 'needResult', None, True, ), # 5 + (6, TType.STRING, 'catName', None, None, ), # 6 ) - def __init__(self, dbName=None, tblName=None, parts=None, ifNotExists=None, needResult=thrift_spec[5][4],): + def __init__(self, dbName=None, tblName=None, parts=None, ifNotExists=None, needResult=thrift_spec[5][4], catName=None,): self.dbName = dbName self.tblName = tblName self.parts = parts self.ifNotExists = ifNotExists self.needResult = needResult + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -8847,11 +9570,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.parts = [] - (_etype421, _size418) = iprot.readListBegin() - for _i422 in xrange(_size418): - _elem423 = Partition() - _elem423.read(iprot) - self.parts.append(_elem423) + (_etype428, _size425) = iprot.readListBegin() + for _i429 in xrange(_size425): + _elem430 = Partition() + _elem430.read(iprot) + self.parts.append(_elem430) iprot.readListEnd() else: iprot.skip(ftype) @@ -8865,6 +9588,11 @@ def read(self, iprot): self.needResult = iprot.readBool() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -8886,8 +9614,8 @@ def write(self, oprot): if self.parts is not None: oprot.writeFieldBegin('parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.parts)) - for iter424 in self.parts: - iter424.write(oprot) + for iter431 in self.parts: + iter431.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.ifNotExists is not None: @@ -8898,6 +9626,10 @@ def write(self, oprot): oprot.writeFieldBegin('needResult', TType.BOOL, 5) oprot.writeBool(self.needResult) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 6) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -8920,6 +9652,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.parts) value = (value * 31) ^ hash(self.ifNotExists) value = (value * 31) ^ hash(self.needResult) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -8958,12 +9691,12 @@ def read(self, iprot): break if fid == 1: if ftype == TType.LIST: - self.partitions = [] - (_etype428, _size425) = iprot.readListBegin() - for _i429 in xrange(_size425): - _elem430 = Partition() - _elem430.read(iprot) - self.partitions.append(_elem430) + self.partitions = [] + (_etype435, _size432) = iprot.readListBegin() + for _i436 in xrange(_size432): + _elem437 = Partition() + _elem437.read(iprot) + self.partitions.append(_elem437) iprot.readListEnd() else: iprot.skip(ftype) @@ -8980,8 +9713,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter431 in self.partitions: - iter431.write(oprot) + for iter438 in self.partitions: + iter438.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9116,21 +9849,21 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.names = [] - (_etype435, _size432) = iprot.readListBegin() - for _i436 in xrange(_size432): - _elem437 = iprot.readString() - self.names.append(_elem437) + (_etype442, _size439) = iprot.readListBegin() + for _i443 in xrange(_size439): + _elem444 = iprot.readString() + self.names.append(_elem444) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.exprs = [] - (_etype441, _size438) = iprot.readListBegin() - for _i442 in xrange(_size438): - _elem443 = DropPartitionsExpr() - _elem443.read(iprot) - self.exprs.append(_elem443) + (_etype448, _size445) = iprot.readListBegin() + for _i449 in xrange(_size445): + _elem450 = DropPartitionsExpr() + _elem450.read(iprot) + self.exprs.append(_elem450) iprot.readListEnd() else: iprot.skip(ftype) @@ -9147,15 +9880,15 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter444 in self.names: - oprot.writeString(iter444) + for iter451 in self.names: + oprot.writeString(iter451) oprot.writeListEnd() oprot.writeFieldEnd() if self.exprs is not None: oprot.writeFieldBegin('exprs', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.exprs)) - for iter445 in self.exprs: - iter445.write(oprot) + for iter452 in self.exprs: + iter452.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9193,6 +9926,7 @@ class DropPartitionsRequest: - ignoreProtection - environmentContext - needResult + - catName """ thrift_spec = ( @@ -9205,9 +9939,10 @@ class DropPartitionsRequest: (6, TType.BOOL, 'ignoreProtection', None, None, ), # 6 (7, TType.STRUCT, 'environmentContext', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 7 (8, TType.BOOL, 'needResult', None, True, ), # 8 + (9, TType.STRING, 'catName', None, None, ), # 9 ) - def __init__(self, dbName=None, tblName=None, parts=None, deleteData=None, ifExists=thrift_spec[5][4], ignoreProtection=None, environmentContext=None, needResult=thrift_spec[8][4],): + def __init__(self, dbName=None, tblName=None, parts=None, deleteData=None, ifExists=thrift_spec[5][4], ignoreProtection=None, environmentContext=None, needResult=thrift_spec[8][4], catName=None,): self.dbName = dbName self.tblName = tblName self.parts = parts @@ -9216,6 +9951,7 @@ def __init__(self, dbName=None, tblName=None, parts=None, deleteData=None, ifExi self.ignoreProtection = ignoreProtection self.environmentContext = environmentContext self.needResult = needResult + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9268,6 +10004,11 @@ def read(self, iprot): self.needResult = iprot.readBool() else: iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9310,6 +10051,10 @@ def write(self, oprot): oprot.writeFieldBegin('needResult', TType.BOOL, 8) oprot.writeBool(self.needResult) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 9) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9333,6 +10078,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.ignoreProtection) value = (value * 31) ^ hash(self.environmentContext) value = (value * 31) ^ hash(self.needResult) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -9357,6 +10103,7 @@ class PartitionValuesRequest: - partitionOrder - ascending - maxParts + - catName """ thrift_spec = ( @@ -9369,9 +10116,10 @@ class PartitionValuesRequest: (6, TType.LIST, 'partitionOrder', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 6 (7, TType.BOOL, 'ascending', None, True, ), # 7 (8, TType.I64, 'maxParts', None, -1, ), # 8 + (9, TType.STRING, 'catName', None, None, ), # 9 ) - def __init__(self, dbName=None, tblName=None, partitionKeys=None, applyDistinct=thrift_spec[4][4], filter=None, partitionOrder=None, ascending=thrift_spec[7][4], maxParts=thrift_spec[8][4],): + def __init__(self, dbName=None, tblName=None, partitionKeys=None, applyDistinct=thrift_spec[4][4], filter=None, partitionOrder=None, ascending=thrift_spec[7][4], maxParts=thrift_spec[8][4], catName=None,): self.dbName = dbName self.tblName = tblName self.partitionKeys = partitionKeys @@ -9380,6 +10128,7 @@ def __init__(self, dbName=None, tblName=None, partitionKeys=None, applyDistinct= self.partitionOrder = partitionOrder self.ascending = ascending self.maxParts = maxParts + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9403,11 +10152,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partitionKeys = [] - (_etype449, _size446) = iprot.readListBegin() - for _i450 in xrange(_size446): - _elem451 = FieldSchema() - _elem451.read(iprot) - self.partitionKeys.append(_elem451) + (_etype456, _size453) = iprot.readListBegin() + for _i457 in xrange(_size453): + _elem458 = FieldSchema() + _elem458.read(iprot) + self.partitionKeys.append(_elem458) iprot.readListEnd() else: iprot.skip(ftype) @@ -9424,11 +10173,11 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.partitionOrder = [] - (_etype455, _size452) = iprot.readListBegin() - for _i456 in xrange(_size452): - _elem457 = FieldSchema() - _elem457.read(iprot) - self.partitionOrder.append(_elem457) + (_etype462, _size459) = iprot.readListBegin() + for _i463 in xrange(_size459): + _elem464 = FieldSchema() + _elem464.read(iprot) + self.partitionOrder.append(_elem464) iprot.readListEnd() else: iprot.skip(ftype) @@ -9442,6 +10191,11 @@ def read(self, iprot): self.maxParts = iprot.readI64() else: iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9463,8 +10217,8 @@ def write(self, oprot): if self.partitionKeys is not None: oprot.writeFieldBegin('partitionKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.partitionKeys)) - for iter458 in self.partitionKeys: - iter458.write(oprot) + for iter465 in self.partitionKeys: + iter465.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.applyDistinct is not None: @@ -9478,8 +10232,8 @@ def write(self, oprot): if self.partitionOrder is not None: oprot.writeFieldBegin('partitionOrder', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.partitionOrder)) - for iter459 in self.partitionOrder: - iter459.write(oprot) + for iter466 in self.partitionOrder: + iter466.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.ascending is not None: @@ -9490,6 +10244,10 @@ def write(self, oprot): oprot.writeFieldBegin('maxParts', TType.I64, 8) oprot.writeI64(self.maxParts) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 9) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9513,6 +10271,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.partitionOrder) value = (value * 31) ^ hash(self.ascending) value = (value * 31) ^ hash(self.maxParts) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -9552,10 +10311,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.row = [] - (_etype463, _size460) = iprot.readListBegin() - for _i464 in xrange(_size460): - _elem465 = iprot.readString() - self.row.append(_elem465) + (_etype470, _size467) = iprot.readListBegin() + for _i471 in xrange(_size467): + _elem472 = iprot.readString() + self.row.append(_elem472) iprot.readListEnd() else: iprot.skip(ftype) @@ -9572,8 +10331,8 @@ def write(self, oprot): if self.row is not None: oprot.writeFieldBegin('row', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.row)) - for iter466 in self.row: - oprot.writeString(iter466) + for iter473 in self.row: + oprot.writeString(iter473) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9627,11 +10386,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitionValues = [] - (_etype470, _size467) = iprot.readListBegin() - for _i471 in xrange(_size467): - _elem472 = PartitionValuesRow() - _elem472.read(iprot) - self.partitionValues.append(_elem472) + (_etype477, _size474) = iprot.readListBegin() + for _i478 in xrange(_size474): + _elem479 = PartitionValuesRow() + _elem479.read(iprot) + self.partitionValues.append(_elem479) iprot.readListEnd() else: iprot.skip(ftype) @@ -9648,8 +10407,8 @@ def write(self, oprot): if self.partitionValues is not None: oprot.writeFieldBegin('partitionValues', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitionValues)) - for iter473 in self.partitionValues: - iter473.write(oprot) + for iter480 in self.partitionValues: + iter480.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9766,6 +10525,7 @@ class Function: - createTime - functionType - resourceUris + - catName """ thrift_spec = ( @@ -9778,9 +10538,10 @@ class Function: (6, TType.I32, 'createTime', None, None, ), # 6 (7, TType.I32, 'functionType', None, None, ), # 7 (8, TType.LIST, 'resourceUris', (TType.STRUCT,(ResourceUri, ResourceUri.thrift_spec)), None, ), # 8 + (9, TType.STRING, 'catName', None, None, ), # 9 ) - def __init__(self, functionName=None, dbName=None, className=None, ownerName=None, ownerType=None, createTime=None, functionType=None, resourceUris=None,): + def __init__(self, functionName=None, dbName=None, className=None, ownerName=None, ownerType=None, createTime=None, functionType=None, resourceUris=None, catName=None,): self.functionName = functionName self.dbName = dbName self.className = className @@ -9789,6 +10550,7 @@ def __init__(self, functionName=None, dbName=None, className=None, ownerName=Non self.createTime = createTime self.functionType = functionType self.resourceUris = resourceUris + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9837,14 +10599,19 @@ def read(self, iprot): elif fid == 8: if ftype == TType.LIST: self.resourceUris = [] - (_etype477, _size474) = iprot.readListBegin() - for _i478 in xrange(_size474): - _elem479 = ResourceUri() - _elem479.read(iprot) - self.resourceUris.append(_elem479) + (_etype484, _size481) = iprot.readListBegin() + for _i485 in xrange(_size481): + _elem486 = ResourceUri() + _elem486.read(iprot) + self.resourceUris.append(_elem486) iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9886,10 +10653,14 @@ def write(self, oprot): if self.resourceUris is not None: oprot.writeFieldBegin('resourceUris', TType.LIST, 8) oprot.writeListBegin(TType.STRUCT, len(self.resourceUris)) - for iter480 in self.resourceUris: - iter480.write(oprot) + for iter487 in self.resourceUris: + iter487.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 9) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9907,6 +10678,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.createTime) value = (value * 31) ^ hash(self.functionType) value = (value * 31) ^ hash(self.resourceUris) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -10131,11 +10903,11 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.open_txns = [] - (_etype484, _size481) = iprot.readListBegin() - for _i485 in xrange(_size481): - _elem486 = TxnInfo() - _elem486.read(iprot) - self.open_txns.append(_elem486) + (_etype491, _size488) = iprot.readListBegin() + for _i492 in xrange(_size488): + _elem493 = TxnInfo() + _elem493.read(iprot) + self.open_txns.append(_elem493) iprot.readListEnd() else: iprot.skip(ftype) @@ -10156,8 +10928,8 @@ def write(self, oprot): if self.open_txns is not None: oprot.writeFieldBegin('open_txns', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.open_txns)) - for iter487 in self.open_txns: - iter487.write(oprot) + for iter494 in self.open_txns: + iter494.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10228,10 +11000,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.open_txns = [] - (_etype491, _size488) = iprot.readListBegin() - for _i492 in xrange(_size488): - _elem493 = iprot.readI64() - self.open_txns.append(_elem493) + (_etype498, _size495) = iprot.readListBegin() + for _i499 in xrange(_size495): + _elem500 = iprot.readI64() + self.open_txns.append(_elem500) iprot.readListEnd() else: iprot.skip(ftype) @@ -10262,8 +11034,8 @@ def write(self, oprot): if self.open_txns is not None: oprot.writeFieldBegin('open_txns', TType.LIST, 2) oprot.writeListBegin(TType.I64, len(self.open_txns)) - for iter494 in self.open_txns: - oprot.writeI64(iter494) + for iter501 in self.open_txns: + oprot.writeI64(iter501) oprot.writeListEnd() oprot.writeFieldEnd() if self.min_open_txn is not None: @@ -10442,10 +11214,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txn_ids = [] - (_etype498, _size495) = iprot.readListBegin() - for _i499 in xrange(_size495): - _elem500 = iprot.readI64() - self.txn_ids.append(_elem500) + (_etype505, _size502) = iprot.readListBegin() + for _i506 in xrange(_size502): + _elem507 = iprot.readI64() + self.txn_ids.append(_elem507) iprot.readListEnd() else: iprot.skip(ftype) @@ -10462,8 +11234,8 @@ def write(self, oprot): if self.txn_ids is not None: oprot.writeFieldBegin('txn_ids', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.txn_ids)) - for iter501 in self.txn_ids: - oprot.writeI64(iter501) + for iter508 in self.txn_ids: + oprot.writeI64(iter508) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10584,10 +11356,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txn_ids = [] - (_etype505, _size502) = iprot.readListBegin() - for _i506 in xrange(_size502): - _elem507 = iprot.readI64() - self.txn_ids.append(_elem507) + (_etype512, _size509) = iprot.readListBegin() + for _i513 in xrange(_size509): + _elem514 = iprot.readI64() + self.txn_ids.append(_elem514) iprot.readListEnd() else: iprot.skip(ftype) @@ -10604,8 +11376,8 @@ def write(self, oprot): if self.txn_ids is not None: oprot.writeFieldBegin('txn_ids', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.txn_ids)) - for iter508 in self.txn_ids: - oprot.writeI64(iter508) + for iter515 in self.txn_ids: + oprot.writeI64(iter515) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10729,10 +11501,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fullTableNames = [] - (_etype512, _size509) = iprot.readListBegin() - for _i513 in xrange(_size509): - _elem514 = iprot.readString() - self.fullTableNames.append(_elem514) + (_etype519, _size516) = iprot.readListBegin() + for _i520 in xrange(_size516): + _elem521 = iprot.readString() + self.fullTableNames.append(_elem521) iprot.readListEnd() else: iprot.skip(ftype) @@ -10754,8 +11526,8 @@ def write(self, oprot): if self.fullTableNames is not None: oprot.writeFieldBegin('fullTableNames', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.fullTableNames)) - for iter515 in self.fullTableNames: - oprot.writeString(iter515) + for iter522 in self.fullTableNames: + oprot.writeString(iter522) oprot.writeListEnd() oprot.writeFieldEnd() if self.validTxnList is not None: @@ -10838,10 +11610,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.invalidWriteIds = [] - (_etype519, _size516) = iprot.readListBegin() - for _i520 in xrange(_size516): - _elem521 = iprot.readI64() - self.invalidWriteIds.append(_elem521) + (_etype526, _size523) = iprot.readListBegin() + for _i527 in xrange(_size523): + _elem528 = iprot.readI64() + self.invalidWriteIds.append(_elem528) iprot.readListEnd() else: iprot.skip(ftype) @@ -10876,8 +11648,8 @@ def write(self, oprot): if self.invalidWriteIds is not None: oprot.writeFieldBegin('invalidWriteIds', TType.LIST, 3) oprot.writeListBegin(TType.I64, len(self.invalidWriteIds)) - for iter522 in self.invalidWriteIds: - oprot.writeI64(iter522) + for iter529 in self.invalidWriteIds: + oprot.writeI64(iter529) oprot.writeListEnd() oprot.writeFieldEnd() if self.minOpenWriteId is not None: @@ -10949,11 +11721,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tblValidWriteIds = [] - (_etype526, _size523) = iprot.readListBegin() - for _i527 in xrange(_size523): - _elem528 = TableValidWriteIds() - _elem528.read(iprot) - self.tblValidWriteIds.append(_elem528) + (_etype533, _size530) = iprot.readListBegin() + for _i534 in xrange(_size530): + _elem535 = TableValidWriteIds() + _elem535.read(iprot) + self.tblValidWriteIds.append(_elem535) iprot.readListEnd() else: iprot.skip(ftype) @@ -10970,8 +11742,8 @@ def write(self, oprot): if self.tblValidWriteIds is not None: oprot.writeFieldBegin('tblValidWriteIds', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tblValidWriteIds)) - for iter529 in self.tblValidWriteIds: - iter529.write(oprot) + for iter536 in self.tblValidWriteIds: + iter536.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11031,10 +11803,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txnIds = [] - (_etype533, _size530) = iprot.readListBegin() - for _i534 in xrange(_size530): - _elem535 = iprot.readI64() - self.txnIds.append(_elem535) + (_etype540, _size537) = iprot.readListBegin() + for _i541 in xrange(_size537): + _elem542 = iprot.readI64() + self.txnIds.append(_elem542) iprot.readListEnd() else: iprot.skip(ftype) @@ -11061,8 +11833,8 @@ def write(self, oprot): if self.txnIds is not None: oprot.writeFieldBegin('txnIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.txnIds)) - for iter536 in self.txnIds: - oprot.writeI64(iter536) + for iter543 in self.txnIds: + oprot.writeI64(iter543) oprot.writeListEnd() oprot.writeFieldEnd() if self.dbName is not None: @@ -11212,11 +11984,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txnToWriteIds = [] - (_etype540, _size537) = iprot.readListBegin() - for _i541 in xrange(_size537): - _elem542 = TxnToWriteId() - _elem542.read(iprot) - self.txnToWriteIds.append(_elem542) + (_etype547, _size544) = iprot.readListBegin() + for _i548 in xrange(_size544): + _elem549 = TxnToWriteId() + _elem549.read(iprot) + self.txnToWriteIds.append(_elem549) iprot.readListEnd() else: iprot.skip(ftype) @@ -11233,8 +12005,8 @@ def write(self, oprot): if self.txnToWriteIds is not None: oprot.writeFieldBegin('txnToWriteIds', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.txnToWriteIds)) - for iter543 in self.txnToWriteIds: - iter543.write(oprot) + for iter550 in self.txnToWriteIds: + iter550.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11462,11 +12234,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.component = [] - (_etype547, _size544) = iprot.readListBegin() - for _i548 in xrange(_size544): - _elem549 = LockComponent() - _elem549.read(iprot) - self.component.append(_elem549) + (_etype554, _size551) = iprot.readListBegin() + for _i555 in xrange(_size551): + _elem556 = LockComponent() + _elem556.read(iprot) + self.component.append(_elem556) iprot.readListEnd() else: iprot.skip(ftype) @@ -11503,8 +12275,8 @@ def write(self, oprot): if self.component is not None: oprot.writeFieldBegin('component', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.component)) - for iter550 in self.component: - iter550.write(oprot) + for iter557 in self.component: + iter557.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.txnid is not None: @@ -12202,11 +12974,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.locks = [] - (_etype554, _size551) = iprot.readListBegin() - for _i555 in xrange(_size551): - _elem556 = ShowLocksResponseElement() - _elem556.read(iprot) - self.locks.append(_elem556) + (_etype561, _size558) = iprot.readListBegin() + for _i562 in xrange(_size558): + _elem563 = ShowLocksResponseElement() + _elem563.read(iprot) + self.locks.append(_elem563) iprot.readListEnd() else: iprot.skip(ftype) @@ -12223,8 +12995,8 @@ def write(self, oprot): if self.locks is not None: oprot.writeFieldBegin('locks', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.locks)) - for iter557 in self.locks: - iter557.write(oprot) + for iter564 in self.locks: + iter564.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12439,20 +13211,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.SET: self.aborted = set() - (_etype561, _size558) = iprot.readSetBegin() - for _i562 in xrange(_size558): - _elem563 = iprot.readI64() - self.aborted.add(_elem563) + (_etype568, _size565) = iprot.readSetBegin() + for _i569 in xrange(_size565): + _elem570 = iprot.readI64() + self.aborted.add(_elem570) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.SET: self.nosuch = set() - (_etype567, _size564) = iprot.readSetBegin() - for _i568 in xrange(_size564): - _elem569 = iprot.readI64() - self.nosuch.add(_elem569) + (_etype574, _size571) = iprot.readSetBegin() + for _i575 in xrange(_size571): + _elem576 = iprot.readI64() + self.nosuch.add(_elem576) iprot.readSetEnd() else: iprot.skip(ftype) @@ -12469,15 +13241,15 @@ def write(self, oprot): if self.aborted is not None: oprot.writeFieldBegin('aborted', TType.SET, 1) oprot.writeSetBegin(TType.I64, len(self.aborted)) - for iter570 in self.aborted: - oprot.writeI64(iter570) + for iter577 in self.aborted: + oprot.writeI64(iter577) oprot.writeSetEnd() oprot.writeFieldEnd() if self.nosuch is not None: oprot.writeFieldBegin('nosuch', TType.SET, 2) oprot.writeSetBegin(TType.I64, len(self.nosuch)) - for iter571 in self.nosuch: - oprot.writeI64(iter571) + for iter578 in self.nosuch: + oprot.writeI64(iter578) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -12574,11 +13346,11 @@ def read(self, iprot): elif fid == 6: if ftype == TType.MAP: self.properties = {} - (_ktype573, _vtype574, _size572 ) = iprot.readMapBegin() - for _i576 in xrange(_size572): - _key577 = iprot.readString() - _val578 = iprot.readString() - self.properties[_key577] = _val578 + (_ktype580, _vtype581, _size579 ) = iprot.readMapBegin() + for _i583 in xrange(_size579): + _key584 = iprot.readString() + _val585 = iprot.readString() + self.properties[_key584] = _val585 iprot.readMapEnd() else: iprot.skip(ftype) @@ -12615,9 +13387,9 @@ def write(self, oprot): if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 6) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter579,viter580 in self.properties.items(): - oprot.writeString(kiter579) - oprot.writeString(viter580) + for kiter586,viter587 in self.properties.items(): + oprot.writeString(kiter586) + oprot.writeString(viter587) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13052,11 +13824,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.compacts = [] - (_etype584, _size581) = iprot.readListBegin() - for _i585 in xrange(_size581): - _elem586 = ShowCompactResponseElement() - _elem586.read(iprot) - self.compacts.append(_elem586) + (_etype591, _size588) = iprot.readListBegin() + for _i592 in xrange(_size588): + _elem593 = ShowCompactResponseElement() + _elem593.read(iprot) + self.compacts.append(_elem593) iprot.readListEnd() else: iprot.skip(ftype) @@ -13073,8 +13845,8 @@ def write(self, oprot): if self.compacts is not None: oprot.writeFieldBegin('compacts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.compacts)) - for iter587 in self.compacts: - iter587.write(oprot) + for iter594 in self.compacts: + iter594.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13163,10 +13935,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.partitionnames = [] - (_etype591, _size588) = iprot.readListBegin() - for _i592 in xrange(_size588): - _elem593 = iprot.readString() - self.partitionnames.append(_elem593) + (_etype598, _size595) = iprot.readListBegin() + for _i599 in xrange(_size595): + _elem600 = iprot.readString() + self.partitionnames.append(_elem600) iprot.readListEnd() else: iprot.skip(ftype) @@ -13204,8 +13976,8 @@ def write(self, oprot): if self.partitionnames is not None: oprot.writeFieldBegin('partitionnames', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.partitionnames)) - for iter594 in self.partitionnames: - oprot.writeString(iter594) + for iter601 in self.partitionnames: + oprot.writeString(iter601) oprot.writeListEnd() oprot.writeFieldEnd() if self.operationType is not None: @@ -13385,6 +14157,7 @@ def __ne__(self, other): class CreationMetadata: """ Attributes: + - catName - dbName - tblName - tablesUsed @@ -13393,13 +14166,15 @@ class CreationMetadata: thrift_spec = ( None, # 0 - (1, TType.STRING, 'dbName', None, None, ), # 1 - (2, TType.STRING, 'tblName', None, None, ), # 2 - (3, TType.SET, 'tablesUsed', (TType.STRING,None), None, ), # 3 - (4, TType.STRING, 'validTxnList', None, None, ), # 4 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'dbName', None, None, ), # 2 + (3, TType.STRING, 'tblName', None, None, ), # 3 + (4, TType.SET, 'tablesUsed', (TType.STRING,None), None, ), # 4 + (5, TType.STRING, 'validTxnList', None, None, ), # 5 ) - def __init__(self, dbName=None, tblName=None, tablesUsed=None, validTxnList=None,): + def __init__(self, catName=None, dbName=None, tblName=None, tablesUsed=None, validTxnList=None,): + self.catName = catName self.dbName = dbName self.tblName = tblName self.tablesUsed = tablesUsed @@ -13416,25 +14191,30 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.dbName = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: - self.tblName = iprot.readString() + self.dbName = iprot.readString() else: iprot.skip(ftype) elif fid == 3: + if ftype == TType.STRING: + self.tblName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: if ftype == TType.SET: self.tablesUsed = set() - (_etype598, _size595) = iprot.readSetBegin() - for _i599 in xrange(_size595): - _elem600 = iprot.readString() - self.tablesUsed.add(_elem600) + (_etype605, _size602) = iprot.readSetBegin() + for _i606 in xrange(_size602): + _elem607 = iprot.readString() + self.tablesUsed.add(_elem607) iprot.readSetEnd() else: iprot.skip(ftype) - elif fid == 4: + elif fid == 5: if ftype == TType.STRING: self.validTxnList = iprot.readString() else: @@ -13449,29 +14229,35 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('CreationMetadata') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.dbName is not None: - oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeFieldBegin('dbName', TType.STRING, 2) oprot.writeString(self.dbName) oprot.writeFieldEnd() if self.tblName is not None: - oprot.writeFieldBegin('tblName', TType.STRING, 2) + oprot.writeFieldBegin('tblName', TType.STRING, 3) oprot.writeString(self.tblName) oprot.writeFieldEnd() if self.tablesUsed is not None: - oprot.writeFieldBegin('tablesUsed', TType.SET, 3) + oprot.writeFieldBegin('tablesUsed', TType.SET, 4) oprot.writeSetBegin(TType.STRING, len(self.tablesUsed)) - for iter601 in self.tablesUsed: - oprot.writeString(iter601) + for iter608 in self.tablesUsed: + oprot.writeString(iter608) oprot.writeSetEnd() oprot.writeFieldEnd() if self.validTxnList is not None: - oprot.writeFieldBegin('validTxnList', TType.STRING, 4) + oprot.writeFieldBegin('validTxnList', TType.STRING, 5) oprot.writeString(self.validTxnList) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): + if self.catName is None: + raise TProtocol.TProtocolException(message='Required field catName is unset!') if self.dbName is None: raise TProtocol.TProtocolException(message='Required field dbName is unset!') if self.tblName is None: @@ -13483,6 +14269,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.dbName) value = (value * 31) ^ hash(self.tblName) value = (value * 31) ^ hash(self.tablesUsed) @@ -13590,6 +14377,7 @@ class NotificationEvent: - tableName - message - messageFormat + - catName """ thrift_spec = ( @@ -13601,9 +14389,10 @@ class NotificationEvent: (5, TType.STRING, 'tableName', None, None, ), # 5 (6, TType.STRING, 'message', None, None, ), # 6 (7, TType.STRING, 'messageFormat', None, None, ), # 7 + (8, TType.STRING, 'catName', None, None, ), # 8 ) - def __init__(self, eventId=None, eventTime=None, eventType=None, dbName=None, tableName=None, message=None, messageFormat=None,): + def __init__(self, eventId=None, eventTime=None, eventType=None, dbName=None, tableName=None, message=None, messageFormat=None, catName=None,): self.eventId = eventId self.eventTime = eventTime self.eventType = eventType @@ -13611,6 +14400,7 @@ def __init__(self, eventId=None, eventTime=None, eventType=None, dbName=None, ta self.tableName = tableName self.message = message self.messageFormat = messageFormat + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -13656,6 +14446,11 @@ def read(self, iprot): self.messageFormat = iprot.readString() else: iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -13694,6 +14489,10 @@ def write(self, oprot): oprot.writeFieldBegin('messageFormat', TType.STRING, 7) oprot.writeString(self.messageFormat) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 8) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -13718,6 +14517,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tableName) value = (value * 31) ^ hash(self.message) value = (value * 31) ^ hash(self.messageFormat) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -13757,11 +14557,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.events = [] - (_etype605, _size602) = iprot.readListBegin() - for _i606 in xrange(_size602): - _elem607 = NotificationEvent() - _elem607.read(iprot) - self.events.append(_elem607) + (_etype612, _size609) = iprot.readListBegin() + for _i613 in xrange(_size609): + _elem614 = NotificationEvent() + _elem614.read(iprot) + self.events.append(_elem614) iprot.readListEnd() else: iprot.skip(ftype) @@ -13778,8 +14578,8 @@ def write(self, oprot): if self.events is not None: oprot.writeFieldBegin('events', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.events)) - for iter608 in self.events: - iter608.write(oprot) + for iter615 in self.events: + iter615.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13879,17 +14679,20 @@ class NotificationEventsCountRequest: Attributes: - fromEventId - dbName + - catName """ thrift_spec = ( None, # 0 (1, TType.I64, 'fromEventId', None, None, ), # 1 (2, TType.STRING, 'dbName', None, None, ), # 2 + (3, TType.STRING, 'catName', None, None, ), # 3 ) - def __init__(self, fromEventId=None, dbName=None,): + def __init__(self, fromEventId=None, dbName=None, catName=None,): self.fromEventId = fromEventId self.dbName = dbName + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -13910,6 +14713,11 @@ def read(self, iprot): self.dbName = iprot.readString() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -13928,6 +14736,10 @@ def write(self, oprot): oprot.writeFieldBegin('dbName', TType.STRING, 2) oprot.writeString(self.dbName) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 3) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -13943,6 +14755,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.fromEventId) value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -14060,20 +14873,20 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.filesAdded = [] - (_etype612, _size609) = iprot.readListBegin() - for _i613 in xrange(_size609): - _elem614 = iprot.readString() - self.filesAdded.append(_elem614) + (_etype619, _size616) = iprot.readListBegin() + for _i620 in xrange(_size616): + _elem621 = iprot.readString() + self.filesAdded.append(_elem621) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.filesAddedChecksum = [] - (_etype618, _size615) = iprot.readListBegin() - for _i619 in xrange(_size615): - _elem620 = iprot.readString() - self.filesAddedChecksum.append(_elem620) + (_etype625, _size622) = iprot.readListBegin() + for _i626 in xrange(_size622): + _elem627 = iprot.readString() + self.filesAddedChecksum.append(_elem627) iprot.readListEnd() else: iprot.skip(ftype) @@ -14094,15 +14907,15 @@ def write(self, oprot): if self.filesAdded is not None: oprot.writeFieldBegin('filesAdded', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.filesAdded)) - for iter621 in self.filesAdded: - oprot.writeString(iter621) + for iter628 in self.filesAdded: + oprot.writeString(iter628) oprot.writeListEnd() oprot.writeFieldEnd() if self.filesAddedChecksum is not None: oprot.writeFieldBegin('filesAddedChecksum', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.filesAddedChecksum)) - for iter622 in self.filesAddedChecksum: - oprot.writeString(iter622) + for iter629 in self.filesAddedChecksum: + oprot.writeString(iter629) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14206,6 +15019,7 @@ class FireEventRequest: - dbName - tableName - partitionVals + - catName """ thrift_spec = ( @@ -14215,14 +15029,16 @@ class FireEventRequest: (3, TType.STRING, 'dbName', None, None, ), # 3 (4, TType.STRING, 'tableName', None, None, ), # 4 (5, TType.LIST, 'partitionVals', (TType.STRING,None), None, ), # 5 + (6, TType.STRING, 'catName', None, None, ), # 6 ) - def __init__(self, successful=None, data=None, dbName=None, tableName=None, partitionVals=None,): + def __init__(self, successful=None, data=None, dbName=None, tableName=None, partitionVals=None, catName=None,): self.successful = successful self.data = data self.dbName = dbName self.tableName = tableName self.partitionVals = partitionVals + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -14257,13 +15073,18 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.partitionVals = [] - (_etype626, _size623) = iprot.readListBegin() - for _i627 in xrange(_size623): - _elem628 = iprot.readString() - self.partitionVals.append(_elem628) + (_etype633, _size630) = iprot.readListBegin() + for _i634 in xrange(_size630): + _elem635 = iprot.readString() + self.partitionVals.append(_elem635) iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -14293,10 +15114,14 @@ def write(self, oprot): if self.partitionVals is not None: oprot.writeFieldBegin('partitionVals', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.partitionVals)) - for iter629 in self.partitionVals: - oprot.writeString(iter629) + for iter636 in self.partitionVals: + oprot.writeString(iter636) oprot.writeListEnd() oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 6) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14315,6 +15140,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.dbName) value = (value * 31) ^ hash(self.tableName) value = (value * 31) ^ hash(self.partitionVals) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -14481,12 +15307,12 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype631, _vtype632, _size630 ) = iprot.readMapBegin() - for _i634 in xrange(_size630): - _key635 = iprot.readI64() - _val636 = MetadataPpdResult() - _val636.read(iprot) - self.metadata[_key635] = _val636 + (_ktype638, _vtype639, _size637 ) = iprot.readMapBegin() + for _i641 in xrange(_size637): + _key642 = iprot.readI64() + _val643 = MetadataPpdResult() + _val643.read(iprot) + self.metadata[_key642] = _val643 iprot.readMapEnd() else: iprot.skip(ftype) @@ -14508,9 +15334,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRUCT, len(self.metadata)) - for kiter637,viter638 in self.metadata.items(): - oprot.writeI64(kiter637) - viter638.write(oprot) + for kiter644,viter645 in self.metadata.items(): + oprot.writeI64(kiter644) + viter645.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -14580,10 +15406,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype642, _size639) = iprot.readListBegin() - for _i643 in xrange(_size639): - _elem644 = iprot.readI64() - self.fileIds.append(_elem644) + (_etype649, _size646) = iprot.readListBegin() + for _i650 in xrange(_size646): + _elem651 = iprot.readI64() + self.fileIds.append(_elem651) iprot.readListEnd() else: iprot.skip(ftype) @@ -14615,8 +15441,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter645 in self.fileIds: - oprot.writeI64(iter645) + for iter652 in self.fileIds: + oprot.writeI64(iter652) oprot.writeListEnd() oprot.writeFieldEnd() if self.expr is not None: @@ -14690,11 +15516,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype647, _vtype648, _size646 ) = iprot.readMapBegin() - for _i650 in xrange(_size646): - _key651 = iprot.readI64() - _val652 = iprot.readString() - self.metadata[_key651] = _val652 + (_ktype654, _vtype655, _size653 ) = iprot.readMapBegin() + for _i657 in xrange(_size653): + _key658 = iprot.readI64() + _val659 = iprot.readString() + self.metadata[_key658] = _val659 iprot.readMapEnd() else: iprot.skip(ftype) @@ -14716,9 +15542,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRING, len(self.metadata)) - for kiter653,viter654 in self.metadata.items(): - oprot.writeI64(kiter653) - oprot.writeString(viter654) + for kiter660,viter661 in self.metadata.items(): + oprot.writeI64(kiter660) + oprot.writeString(viter661) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -14779,10 +15605,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype658, _size655) = iprot.readListBegin() - for _i659 in xrange(_size655): - _elem660 = iprot.readI64() - self.fileIds.append(_elem660) + (_etype665, _size662) = iprot.readListBegin() + for _i666 in xrange(_size662): + _elem667 = iprot.readI64() + self.fileIds.append(_elem667) iprot.readListEnd() else: iprot.skip(ftype) @@ -14799,8 +15625,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter661 in self.fileIds: - oprot.writeI64(iter661) + for iter668 in self.fileIds: + oprot.writeI64(iter668) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14906,20 +15732,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype665, _size662) = iprot.readListBegin() - for _i666 in xrange(_size662): - _elem667 = iprot.readI64() - self.fileIds.append(_elem667) + (_etype672, _size669) = iprot.readListBegin() + for _i673 in xrange(_size669): + _elem674 = iprot.readI64() + self.fileIds.append(_elem674) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.metadata = [] - (_etype671, _size668) = iprot.readListBegin() - for _i672 in xrange(_size668): - _elem673 = iprot.readString() - self.metadata.append(_elem673) + (_etype678, _size675) = iprot.readListBegin() + for _i679 in xrange(_size675): + _elem680 = iprot.readString() + self.metadata.append(_elem680) iprot.readListEnd() else: iprot.skip(ftype) @@ -14941,15 +15767,15 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter674 in self.fileIds: - oprot.writeI64(iter674) + for iter681 in self.fileIds: + oprot.writeI64(iter681) oprot.writeListEnd() oprot.writeFieldEnd() if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.metadata)) - for iter675 in self.metadata: - oprot.writeString(iter675) + for iter682 in self.metadata: + oprot.writeString(iter682) oprot.writeListEnd() oprot.writeFieldEnd() if self.type is not None: @@ -15057,10 +15883,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype679, _size676) = iprot.readListBegin() - for _i680 in xrange(_size676): - _elem681 = iprot.readI64() - self.fileIds.append(_elem681) + (_etype686, _size683) = iprot.readListBegin() + for _i687 in xrange(_size683): + _elem688 = iprot.readI64() + self.fileIds.append(_elem688) iprot.readListEnd() else: iprot.skip(ftype) @@ -15077,8 +15903,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter682 in self.fileIds: - oprot.writeI64(iter682) + for iter689 in self.fileIds: + oprot.writeI64(iter689) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15307,11 +16133,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.functions = [] - (_etype686, _size683) = iprot.readListBegin() - for _i687 in xrange(_size683): - _elem688 = Function() - _elem688.read(iprot) - self.functions.append(_elem688) + (_etype693, _size690) = iprot.readListBegin() + for _i694 in xrange(_size690): + _elem695 = Function() + _elem695.read(iprot) + self.functions.append(_elem695) iprot.readListEnd() else: iprot.skip(ftype) @@ -15328,8 +16154,8 @@ def write(self, oprot): if self.functions is not None: oprot.writeFieldBegin('functions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.functions)) - for iter689 in self.functions: - iter689.write(oprot) + for iter696 in self.functions: + iter696.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15381,10 +16207,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.values = [] - (_etype693, _size690) = iprot.readListBegin() - for _i694 in xrange(_size690): - _elem695 = iprot.readI32() - self.values.append(_elem695) + (_etype700, _size697) = iprot.readListBegin() + for _i701 in xrange(_size697): + _elem702 = iprot.readI32() + self.values.append(_elem702) iprot.readListEnd() else: iprot.skip(ftype) @@ -15401,8 +16227,8 @@ def write(self, oprot): if self.values is not None: oprot.writeFieldBegin('values', TType.LIST, 1) oprot.writeListBegin(TType.I32, len(self.values)) - for iter696 in self.values: - oprot.writeI32(iter696) + for iter703 in self.values: + oprot.writeI32(iter703) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15436,6 +16262,7 @@ class GetTableRequest: - dbName - tblName - capabilities + - catName """ thrift_spec = ( @@ -15443,12 +16270,14 @@ class GetTableRequest: (1, TType.STRING, 'dbName', None, None, ), # 1 (2, TType.STRING, 'tblName', None, None, ), # 2 (3, TType.STRUCT, 'capabilities', (ClientCapabilities, ClientCapabilities.thrift_spec), None, ), # 3 + (4, TType.STRING, 'catName', None, None, ), # 4 ) - def __init__(self, dbName=None, tblName=None, capabilities=None,): + def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None,): self.dbName = dbName self.tblName = tblName self.capabilities = capabilities + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -15475,6 +16304,11 @@ def read(self, iprot): self.capabilities.read(iprot) else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -15497,6 +16331,10 @@ def write(self, oprot): oprot.writeFieldBegin('capabilities', TType.STRUCT, 3) self.capabilities.write(oprot) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 4) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -15513,6 +16351,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.dbName) value = (value * 31) ^ hash(self.tblName) value = (value * 31) ^ hash(self.capabilities) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -15600,6 +16439,7 @@ class GetTablesRequest: - dbName - tblNames - capabilities + - catName """ thrift_spec = ( @@ -15607,12 +16447,14 @@ class GetTablesRequest: (1, TType.STRING, 'dbName', None, None, ), # 1 (2, TType.LIST, 'tblNames', (TType.STRING,None), None, ), # 2 (3, TType.STRUCT, 'capabilities', (ClientCapabilities, ClientCapabilities.thrift_spec), None, ), # 3 + (4, TType.STRING, 'catName', None, None, ), # 4 ) - def __init__(self, dbName=None, tblNames=None, capabilities=None,): + def __init__(self, dbName=None, tblNames=None, capabilities=None, catName=None,): self.dbName = dbName self.tblNames = tblNames self.capabilities = capabilities + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -15631,10 +16473,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tblNames = [] - (_etype700, _size697) = iprot.readListBegin() - for _i701 in xrange(_size697): - _elem702 = iprot.readString() - self.tblNames.append(_elem702) + (_etype707, _size704) = iprot.readListBegin() + for _i708 in xrange(_size704): + _elem709 = iprot.readString() + self.tblNames.append(_elem709) iprot.readListEnd() else: iprot.skip(ftype) @@ -15644,6 +16486,11 @@ def read(self, iprot): self.capabilities.read(iprot) else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -15661,14 +16508,18 @@ def write(self, oprot): if self.tblNames is not None: oprot.writeFieldBegin('tblNames', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tblNames)) - for iter703 in self.tblNames: - oprot.writeString(iter703) + for iter710 in self.tblNames: + oprot.writeString(iter710) oprot.writeListEnd() oprot.writeFieldEnd() if self.capabilities is not None: oprot.writeFieldBegin('capabilities', TType.STRUCT, 3) self.capabilities.write(oprot) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 4) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -15683,6 +16534,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.dbName) value = (value * 31) ^ hash(self.tblNames) value = (value * 31) ^ hash(self.capabilities) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -15722,11 +16574,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tables = [] - (_etype707, _size704) = iprot.readListBegin() - for _i708 in xrange(_size704): - _elem709 = Table() - _elem709.read(iprot) - self.tables.append(_elem709) + (_etype714, _size711) = iprot.readListBegin() + for _i715 in xrange(_size711): + _elem716 = Table() + _elem716.read(iprot) + self.tables.append(_elem716) iprot.readListEnd() else: iprot.skip(ftype) @@ -15743,8 +16595,8 @@ def write(self, oprot): if self.tables is not None: oprot.writeFieldBegin('tables', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tables)) - for iter710 in self.tables: - iter710.write(oprot) + for iter717 in self.tables: + iter717.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15907,6 +16759,7 @@ class TableMeta: - tableName - tableType - comments + - catName """ thrift_spec = ( @@ -15915,13 +16768,15 @@ class TableMeta: (2, TType.STRING, 'tableName', None, None, ), # 2 (3, TType.STRING, 'tableType', None, None, ), # 3 (4, TType.STRING, 'comments', None, None, ), # 4 + (5, TType.STRING, 'catName', None, None, ), # 5 ) - def __init__(self, dbName=None, tableName=None, tableType=None, comments=None,): + def __init__(self, dbName=None, tableName=None, tableType=None, comments=None, catName=None,): self.dbName = dbName self.tableName = tableName self.tableType = tableType self.comments = comments + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -15952,6 +16807,11 @@ def read(self, iprot): self.comments = iprot.readString() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -15978,6 +16838,10 @@ def write(self, oprot): oprot.writeFieldBegin('comments', TType.STRING, 4) oprot.writeString(self.comments) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 5) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -15997,6 +16861,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tableName) value = (value * 31) ^ hash(self.tableType) value = (value * 31) ^ hash(self.comments) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -16042,10 +16907,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.SET: self.tablesUsed = set() - (_etype714, _size711) = iprot.readSetBegin() - for _i715 in xrange(_size711): - _elem716 = iprot.readString() - self.tablesUsed.add(_elem716) + (_etype721, _size718) = iprot.readSetBegin() + for _i722 in xrange(_size718): + _elem723 = iprot.readString() + self.tablesUsed.add(_elem723) iprot.readSetEnd() else: iprot.skip(ftype) @@ -16072,8 +16937,8 @@ def write(self, oprot): if self.tablesUsed is not None: oprot.writeFieldBegin('tablesUsed', TType.SET, 1) oprot.writeSetBegin(TType.STRING, len(self.tablesUsed)) - for iter717 in self.tablesUsed: - oprot.writeString(iter717) + for iter724 in self.tablesUsed: + oprot.writeString(iter724) oprot.writeSetEnd() oprot.writeFieldEnd() if self.validTxnList is not None: @@ -16977,44 +17842,44 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.pools = [] - (_etype721, _size718) = iprot.readListBegin() - for _i722 in xrange(_size718): - _elem723 = WMPool() - _elem723.read(iprot) - self.pools.append(_elem723) + (_etype728, _size725) = iprot.readListBegin() + for _i729 in xrange(_size725): + _elem730 = WMPool() + _elem730.read(iprot) + self.pools.append(_elem730) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.mappings = [] - (_etype727, _size724) = iprot.readListBegin() - for _i728 in xrange(_size724): - _elem729 = WMMapping() - _elem729.read(iprot) - self.mappings.append(_elem729) + (_etype734, _size731) = iprot.readListBegin() + for _i735 in xrange(_size731): + _elem736 = WMMapping() + _elem736.read(iprot) + self.mappings.append(_elem736) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.triggers = [] - (_etype733, _size730) = iprot.readListBegin() - for _i734 in xrange(_size730): - _elem735 = WMTrigger() - _elem735.read(iprot) - self.triggers.append(_elem735) + (_etype740, _size737) = iprot.readListBegin() + for _i741 in xrange(_size737): + _elem742 = WMTrigger() + _elem742.read(iprot) + self.triggers.append(_elem742) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.poolTriggers = [] - (_etype739, _size736) = iprot.readListBegin() - for _i740 in xrange(_size736): - _elem741 = WMPoolTrigger() - _elem741.read(iprot) - self.poolTriggers.append(_elem741) + (_etype746, _size743) = iprot.readListBegin() + for _i747 in xrange(_size743): + _elem748 = WMPoolTrigger() + _elem748.read(iprot) + self.poolTriggers.append(_elem748) iprot.readListEnd() else: iprot.skip(ftype) @@ -17035,29 +17900,29 @@ def write(self, oprot): if self.pools is not None: oprot.writeFieldBegin('pools', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.pools)) - for iter742 in self.pools: - iter742.write(oprot) + for iter749 in self.pools: + iter749.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.mappings is not None: oprot.writeFieldBegin('mappings', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.mappings)) - for iter743 in self.mappings: - iter743.write(oprot) + for iter750 in self.mappings: + iter750.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.triggers is not None: oprot.writeFieldBegin('triggers', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.triggers)) - for iter744 in self.triggers: - iter744.write(oprot) + for iter751 in self.triggers: + iter751.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.poolTriggers is not None: oprot.writeFieldBegin('poolTriggers', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.poolTriggers)) - for iter745 in self.poolTriggers: - iter745.write(oprot) + for iter752 in self.poolTriggers: + iter752.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17531,11 +18396,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.resourcePlans = [] - (_etype749, _size746) = iprot.readListBegin() - for _i750 in xrange(_size746): - _elem751 = WMResourcePlan() - _elem751.read(iprot) - self.resourcePlans.append(_elem751) + (_etype756, _size753) = iprot.readListBegin() + for _i757 in xrange(_size753): + _elem758 = WMResourcePlan() + _elem758.read(iprot) + self.resourcePlans.append(_elem758) iprot.readListEnd() else: iprot.skip(ftype) @@ -17552,8 +18417,8 @@ def write(self, oprot): if self.resourcePlans is not None: oprot.writeFieldBegin('resourcePlans', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.resourcePlans)) - for iter752 in self.resourcePlans: - iter752.write(oprot) + for iter759 in self.resourcePlans: + iter759.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17857,20 +18722,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.errors = [] - (_etype756, _size753) = iprot.readListBegin() - for _i757 in xrange(_size753): - _elem758 = iprot.readString() - self.errors.append(_elem758) + (_etype763, _size760) = iprot.readListBegin() + for _i764 in xrange(_size760): + _elem765 = iprot.readString() + self.errors.append(_elem765) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.warnings = [] - (_etype762, _size759) = iprot.readListBegin() - for _i763 in xrange(_size759): - _elem764 = iprot.readString() - self.warnings.append(_elem764) + (_etype769, _size766) = iprot.readListBegin() + for _i770 in xrange(_size766): + _elem771 = iprot.readString() + self.warnings.append(_elem771) iprot.readListEnd() else: iprot.skip(ftype) @@ -17887,15 +18752,15 @@ def write(self, oprot): if self.errors is not None: oprot.writeFieldBegin('errors', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.errors)) - for iter765 in self.errors: - oprot.writeString(iter765) + for iter772 in self.errors: + oprot.writeString(iter772) oprot.writeListEnd() oprot.writeFieldEnd() if self.warnings is not None: oprot.writeFieldBegin('warnings', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.warnings)) - for iter766 in self.warnings: - oprot.writeString(iter766) + for iter773 in self.warnings: + oprot.writeString(iter773) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18472,11 +19337,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.triggers = [] - (_etype770, _size767) = iprot.readListBegin() - for _i771 in xrange(_size767): - _elem772 = WMTrigger() - _elem772.read(iprot) - self.triggers.append(_elem772) + (_etype777, _size774) = iprot.readListBegin() + for _i778 in xrange(_size774): + _elem779 = WMTrigger() + _elem779.read(iprot) + self.triggers.append(_elem779) iprot.readListEnd() else: iprot.skip(ftype) @@ -18493,8 +19358,8 @@ def write(self, oprot): if self.triggers is not None: oprot.writeFieldBegin('triggers', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.triggers)) - for iter773 in self.triggers: - iter773.write(oprot) + for iter780 in self.triggers: + iter780.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19273,6 +20138,7 @@ class ISchema: Attributes: - schemaType - name + - catName - dbName - compatibility - validationLevel @@ -19285,17 +20151,19 @@ class ISchema: None, # 0 (1, TType.I32, 'schemaType', None, None, ), # 1 (2, TType.STRING, 'name', None, None, ), # 2 - (3, TType.STRING, 'dbName', None, None, ), # 3 - (4, TType.I32, 'compatibility', None, None, ), # 4 - (5, TType.I32, 'validationLevel', None, None, ), # 5 - (6, TType.BOOL, 'canEvolve', None, None, ), # 6 - (7, TType.STRING, 'schemaGroup', None, None, ), # 7 - (8, TType.STRING, 'description', None, None, ), # 8 + (3, TType.STRING, 'catName', None, None, ), # 3 + (4, TType.STRING, 'dbName', None, None, ), # 4 + (5, TType.I32, 'compatibility', None, None, ), # 5 + (6, TType.I32, 'validationLevel', None, None, ), # 6 + (7, TType.BOOL, 'canEvolve', None, None, ), # 7 + (8, TType.STRING, 'schemaGroup', None, None, ), # 8 + (9, TType.STRING, 'description', None, None, ), # 9 ) - def __init__(self, schemaType=None, name=None, dbName=None, compatibility=None, validationLevel=None, canEvolve=None, schemaGroup=None, description=None,): + def __init__(self, schemaType=None, name=None, catName=None, dbName=None, compatibility=None, validationLevel=None, canEvolve=None, schemaGroup=None, description=None,): self.schemaType = schemaType self.name = name + self.catName = catName self.dbName = dbName self.compatibility = compatibility self.validationLevel = validationLevel @@ -19324,30 +20192,35 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: - self.dbName = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 4: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: if ftype == TType.I32: self.compatibility = iprot.readI32() else: iprot.skip(ftype) - elif fid == 5: + elif fid == 6: if ftype == TType.I32: self.validationLevel = iprot.readI32() else: iprot.skip(ftype) - elif fid == 6: + elif fid == 7: if ftype == TType.BOOL: self.canEvolve = iprot.readBool() else: iprot.skip(ftype) - elif fid == 7: + elif fid == 8: if ftype == TType.STRING: self.schemaGroup = iprot.readString() else: iprot.skip(ftype) - elif fid == 8: + elif fid == 9: if ftype == TType.STRING: self.description = iprot.readString() else: @@ -19370,28 +20243,32 @@ def write(self, oprot): oprot.writeFieldBegin('name', TType.STRING, 2) oprot.writeString(self.name) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 3) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.dbName is not None: - oprot.writeFieldBegin('dbName', TType.STRING, 3) + oprot.writeFieldBegin('dbName', TType.STRING, 4) oprot.writeString(self.dbName) oprot.writeFieldEnd() if self.compatibility is not None: - oprot.writeFieldBegin('compatibility', TType.I32, 4) + oprot.writeFieldBegin('compatibility', TType.I32, 5) oprot.writeI32(self.compatibility) oprot.writeFieldEnd() if self.validationLevel is not None: - oprot.writeFieldBegin('validationLevel', TType.I32, 5) + oprot.writeFieldBegin('validationLevel', TType.I32, 6) oprot.writeI32(self.validationLevel) oprot.writeFieldEnd() if self.canEvolve is not None: - oprot.writeFieldBegin('canEvolve', TType.BOOL, 6) + oprot.writeFieldBegin('canEvolve', TType.BOOL, 7) oprot.writeBool(self.canEvolve) oprot.writeFieldEnd() if self.schemaGroup is not None: - oprot.writeFieldBegin('schemaGroup', TType.STRING, 7) + oprot.writeFieldBegin('schemaGroup', TType.STRING, 8) oprot.writeString(self.schemaGroup) oprot.writeFieldEnd() if self.description is not None: - oprot.writeFieldBegin('description', TType.STRING, 8) + oprot.writeFieldBegin('description', TType.STRING, 9) oprot.writeString(self.description) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19405,6 +20282,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.schemaType) value = (value * 31) ^ hash(self.name) + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.dbName) value = (value * 31) ^ hash(self.compatibility) value = (value * 31) ^ hash(self.validationLevel) @@ -19427,17 +20305,20 @@ def __ne__(self, other): class ISchemaName: """ Attributes: + - catName - dbName - schemaName """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'dbName', None, None, ), # 1 - (2, TType.STRING, 'schemaName', None, None, ), # 2 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'dbName', None, None, ), # 2 + (3, TType.STRING, 'schemaName', None, None, ), # 3 ) - def __init__(self, dbName=None, schemaName=None,): + def __init__(self, catName=None, dbName=None, schemaName=None,): + self.catName = catName self.dbName = dbName self.schemaName = schemaName @@ -19452,11 +20333,16 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.dbName = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: self.schemaName = iprot.readString() else: iprot.skip(ftype) @@ -19470,12 +20356,16 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ISchemaName') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.dbName is not None: - oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeFieldBegin('dbName', TType.STRING, 2) oprot.writeString(self.dbName) oprot.writeFieldEnd() if self.schemaName is not None: - oprot.writeFieldBegin('schemaName', TType.STRING, 2) + oprot.writeFieldBegin('schemaName', TType.STRING, 3) oprot.writeString(self.schemaName) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19487,6 +20377,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.dbName) value = (value * 31) ^ hash(self.schemaName) return value @@ -19652,11 +20543,11 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.cols = [] - (_etype777, _size774) = iprot.readListBegin() - for _i778 in xrange(_size774): - _elem779 = FieldSchema() - _elem779.read(iprot) - self.cols.append(_elem779) + (_etype784, _size781) = iprot.readListBegin() + for _i785 in xrange(_size781): + _elem786 = FieldSchema() + _elem786.read(iprot) + self.cols.append(_elem786) iprot.readListEnd() else: iprot.skip(ftype) @@ -19716,8 +20607,8 @@ def write(self, oprot): if self.cols is not None: oprot.writeFieldBegin('cols', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.cols)) - for iter780 in self.cols: - iter780.write(oprot) + for iter787 in self.cols: + iter787.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.state is not None: @@ -19972,11 +20863,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.schemaVersions = [] - (_etype784, _size781) = iprot.readListBegin() - for _i785 in xrange(_size781): - _elem786 = SchemaVersionDescriptor() - _elem786.read(iprot) - self.schemaVersions.append(_elem786) + (_etype791, _size788) = iprot.readListBegin() + for _i792 in xrange(_size788): + _elem793 = SchemaVersionDescriptor() + _elem793.read(iprot) + self.schemaVersions.append(_elem793) iprot.readListEnd() else: iprot.skip(ftype) @@ -19993,8 +20884,8 @@ def write(self, oprot): if self.schemaVersions is not None: oprot.writeFieldBegin('schemaVersions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.schemaVersions)) - for iter787 in self.schemaVersions: - iter787.write(oprot) + for iter794 in self.schemaVersions: + iter794.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() diff --git standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index 16c814e472..f2cdcf949d 100644 --- standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -231,6 +231,7 @@ class SQLPrimaryKey ENABLE_CSTR = 6 VALIDATE_CSTR = 7 RELY_CSTR = 8 + CATNAME = 9 FIELDS = { TABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'table_db'}, @@ -240,7 +241,8 @@ class SQLPrimaryKey PK_NAME => {:type => ::Thrift::Types::STRING, :name => 'pk_name'}, ENABLE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'enable_cstr'}, VALIDATE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'validate_cstr'}, - RELY_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'rely_cstr'} + RELY_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'rely_cstr'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -267,6 +269,7 @@ class SQLForeignKey ENABLE_CSTR = 12 VALIDATE_CSTR = 13 RELY_CSTR = 14 + CATNAME = 15 FIELDS = { PKTABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'pktable_db'}, @@ -282,7 +285,8 @@ class SQLForeignKey PK_NAME => {:type => ::Thrift::Types::STRING, :name => 'pk_name'}, ENABLE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'enable_cstr'}, VALIDATE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'validate_cstr'}, - RELY_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'rely_cstr'} + RELY_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'rely_cstr'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -295,16 +299,18 @@ end class SQLUniqueConstraint include ::Thrift::Struct, ::Thrift::Struct_Union - TABLE_DB = 1 - TABLE_NAME = 2 - COLUMN_NAME = 3 - KEY_SEQ = 4 - UK_NAME = 5 - ENABLE_CSTR = 6 - VALIDATE_CSTR = 7 - RELY_CSTR = 8 + CATNAME = 1 + TABLE_DB = 2 + TABLE_NAME = 3 + COLUMN_NAME = 4 + KEY_SEQ = 5 + UK_NAME = 6 + ENABLE_CSTR = 7 + VALIDATE_CSTR = 8 + RELY_CSTR = 9 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, TABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'table_db'}, TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, COLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'column_name'}, @@ -325,15 +331,17 @@ end class SQLNotNullConstraint include ::Thrift::Struct, ::Thrift::Struct_Union - TABLE_DB = 1 - TABLE_NAME = 2 - COLUMN_NAME = 3 - NN_NAME = 4 - ENABLE_CSTR = 5 - VALIDATE_CSTR = 6 - RELY_CSTR = 7 + CATNAME = 1 + TABLE_DB = 2 + TABLE_NAME = 3 + COLUMN_NAME = 4 + NN_NAME = 5 + ENABLE_CSTR = 6 + VALIDATE_CSTR = 7 + RELY_CSTR = 8 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, TABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'table_db'}, TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, COLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'column_name'}, @@ -353,16 +361,18 @@ end class SQLDefaultConstraint include ::Thrift::Struct, ::Thrift::Struct_Union - TABLE_DB = 1 - TABLE_NAME = 2 - COLUMN_NAME = 3 - DEFAULT_VALUE = 4 - DC_NAME = 5 - ENABLE_CSTR = 6 - VALIDATE_CSTR = 7 - RELY_CSTR = 8 + CATNAME = 1 + TABLE_DB = 2 + TABLE_NAME = 3 + COLUMN_NAME = 4 + DEFAULT_VALUE = 5 + DC_NAME = 6 + ENABLE_CSTR = 7 + VALIDATE_CSTR = 8 + RELY_CSTR = 9 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, TABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'table_db'}, TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, COLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'column_name'}, @@ -383,16 +393,18 @@ end class SQLCheckConstraint include ::Thrift::Struct, ::Thrift::Struct_Union - TABLE_DB = 1 - TABLE_NAME = 2 - COLUMN_NAME = 3 - CHECK_EXPRESSION = 4 - DC_NAME = 5 - ENABLE_CSTR = 6 - VALIDATE_CSTR = 7 - RELY_CSTR = 8 + CATNAME = 1 + TABLE_DB = 2 + TABLE_NAME = 3 + COLUMN_NAME = 4 + CHECK_EXPRESSION = 5 + DC_NAME = 6 + ENABLE_CSTR = 7 + VALIDATE_CSTR = 8 + RELY_CSTR = 9 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, TABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'table_db'}, TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, COLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'column_name'}, @@ -440,13 +452,15 @@ class HiveObjectRef OBJECTNAME = 3 PARTVALUES = 4 COLUMNNAME = 5 + CATNAME = 6 FIELDS = { OBJECTTYPE => {:type => ::Thrift::Types::I32, :name => 'objectType', :enum_class => ::HiveObjectType}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, OBJECTNAME => {:type => ::Thrift::Types::STRING, :name => 'objectName'}, PARTVALUES => {:type => ::Thrift::Types::LIST, :name => 'partValues', :element => {:type => ::Thrift::Types::STRING}}, - COLUMNNAME => {:type => ::Thrift::Types::STRING, :name => 'columnName'} + COLUMNNAME => {:type => ::Thrift::Types::STRING, :name => 'columnName'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -768,6 +782,106 @@ class GrantRevokeRoleResponse ::Thrift::Struct.generate_accessors self end +class Catalog + include ::Thrift::Struct, ::Thrift::Struct_Union + NAME = 1 + DESCRIPTION = 2 + LOCATIONURI = 3 + + FIELDS = { + NAME => {:type => ::Thrift::Types::STRING, :name => 'name'}, + DESCRIPTION => {:type => ::Thrift::Types::STRING, :name => 'description', :optional => true}, + LOCATIONURI => {:type => ::Thrift::Types::STRING, :name => 'locationUri'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class CreateCatalogRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + CATALOG = 1 + + FIELDS = { + CATALOG => {:type => ::Thrift::Types::STRUCT, :name => 'catalog', :class => ::Catalog} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class GetCatalogRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + NAME = 1 + + FIELDS = { + NAME => {:type => ::Thrift::Types::STRING, :name => 'name'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class GetCatalogResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + CATALOG = 1 + + FIELDS = { + CATALOG => {:type => ::Thrift::Types::STRUCT, :name => 'catalog', :class => ::Catalog} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class GetCatalogsResponse + include ::Thrift::Struct, ::Thrift::Struct_Union + NAMES = 1 + + FIELDS = { + NAMES => {:type => ::Thrift::Types::LIST, :name => 'names', :element => {:type => ::Thrift::Types::STRING}} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class DropCatalogRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + NAME = 1 + + FIELDS = { + NAME => {:type => ::Thrift::Types::STRING, :name => 'name'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + class Database include ::Thrift::Struct, ::Thrift::Struct_Union NAME = 1 @@ -777,6 +891,7 @@ class Database PRIVILEGES = 5 OWNERNAME = 6 OWNERTYPE = 7 + CATALOGNAME = 8 FIELDS = { NAME => {:type => ::Thrift::Types::STRING, :name => 'name'}, @@ -785,7 +900,8 @@ class Database PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}}, PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}, OWNERNAME => {:type => ::Thrift::Types::STRING, :name => 'ownerName', :optional => true}, - OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :optional => true, :enum_class => ::PrincipalType} + OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :optional => true, :enum_class => ::PrincipalType}, + CATALOGNAME => {:type => ::Thrift::Types::STRING, :name => 'catalogName', :optional => true} } def struct_fields; FIELDS; end @@ -924,6 +1040,7 @@ class Table TEMPORARY = 14 REWRITEENABLED = 15 CREATIONMETADATA = 16 + CATNAME = 17 FIELDS = { TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, @@ -941,7 +1058,8 @@ class Table PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}, TEMPORARY => {:type => ::Thrift::Types::BOOL, :name => 'temporary', :default => false, :optional => true}, REWRITEENABLED => {:type => ::Thrift::Types::BOOL, :name => 'rewriteEnabled', :optional => true}, - CREATIONMETADATA => {:type => ::Thrift::Types::STRUCT, :name => 'creationMetadata', :class => ::CreationMetadata, :optional => true} + CREATIONMETADATA => {:type => ::Thrift::Types::STRUCT, :name => 'creationMetadata', :class => ::CreationMetadata, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -962,6 +1080,7 @@ class Partition SD = 6 PARAMETERS = 7 PRIVILEGES = 8 + CATNAME = 9 FIELDS = { VALUES => {:type => ::Thrift::Types::LIST, :name => 'values', :element => {:type => ::Thrift::Types::STRING}}, @@ -971,7 +1090,8 @@ class Partition LASTACCESSTIME => {:type => ::Thrift::Types::I32, :name => 'lastAccessTime'}, SD => {:type => ::Thrift::Types::STRUCT, :name => 'sd', :class => ::StorageDescriptor}, PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}}, - PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true} + PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1049,13 +1169,15 @@ class PartitionSpec ROOTPATH = 3 SHAREDSDPARTITIONSPEC = 4 PARTITIONLIST = 5 + CATNAME = 6 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, ROOTPATH => {:type => ::Thrift::Types::STRING, :name => 'rootPath'}, SHAREDSDPARTITIONSPEC => {:type => ::Thrift::Types::STRUCT, :name => 'sharedSDPartitionSpec', :class => ::PartitionSpecWithSharedSD, :optional => true}, - PARTITIONLIST => {:type => ::Thrift::Types::STRUCT, :name => 'partitionList', :class => ::PartitionListComposingSpec, :optional => true} + PARTITIONLIST => {:type => ::Thrift::Types::STRUCT, :name => 'partitionList', :class => ::PartitionListComposingSpec, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1374,13 +1496,15 @@ class ColumnStatisticsDesc TABLENAME = 3 PARTNAME = 4 LASTANALYZED = 5 + CATNAME = 6 FIELDS = { ISTBLLEVEL => {:type => ::Thrift::Types::BOOL, :name => 'isTblLevel'}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, PARTNAME => {:type => ::Thrift::Types::STRING, :name => 'partName', :optional => true}, - LASTANALYZED => {:type => ::Thrift::Types::I64, :name => 'lastAnalyzed', :optional => true} + LASTANALYZED => {:type => ::Thrift::Types::I64, :name => 'lastAnalyzed', :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1491,10 +1615,12 @@ class PrimaryKeysRequest include ::Thrift::Struct, ::Thrift::Struct_Union DB_NAME = 1 TBL_NAME = 2 + CATNAME = 3 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, - TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'} + TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1530,12 +1656,14 @@ class ForeignKeysRequest PARENT_TBL_NAME = 2 FOREIGN_DB_NAME = 3 FOREIGN_TBL_NAME = 4 + CATNAME = 5 FIELDS = { PARENT_DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'parent_db_name'}, PARENT_TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'parent_tbl_name'}, FOREIGN_DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'foreign_db_name'}, - FOREIGN_TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'foreign_tbl_name'} + FOREIGN_TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'foreign_tbl_name'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1565,10 +1693,12 @@ end class UniqueConstraintsRequest include ::Thrift::Struct, ::Thrift::Struct_Union - DB_NAME = 1 - TBL_NAME = 2 + CATNAME = 1 + DB_NAME = 2 + TBL_NAME = 3 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'} } @@ -1576,6 +1706,7 @@ class UniqueConstraintsRequest def struct_fields; FIELDS; end def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field catName is unset!') unless @catName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field db_name is unset!') unless @db_name raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tbl_name is unset!') unless @tbl_name end @@ -1602,10 +1733,12 @@ end class NotNullConstraintsRequest include ::Thrift::Struct, ::Thrift::Struct_Union - DB_NAME = 1 - TBL_NAME = 2 + CATNAME = 1 + DB_NAME = 2 + TBL_NAME = 3 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'} } @@ -1613,6 +1746,7 @@ class NotNullConstraintsRequest def struct_fields; FIELDS; end def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field catName is unset!') unless @catName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field db_name is unset!') unless @db_name raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tbl_name is unset!') unless @tbl_name end @@ -1639,10 +1773,12 @@ end class DefaultConstraintsRequest include ::Thrift::Struct, ::Thrift::Struct_Union - DB_NAME = 1 - TBL_NAME = 2 + CATNAME = 1 + DB_NAME = 2 + TBL_NAME = 3 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'} } @@ -1650,6 +1786,7 @@ class DefaultConstraintsRequest def struct_fields; FIELDS; end def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field catName is unset!') unless @catName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field db_name is unset!') unless @db_name raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tbl_name is unset!') unless @tbl_name end @@ -1676,10 +1813,12 @@ end class CheckConstraintsRequest include ::Thrift::Struct, ::Thrift::Struct_Union - DB_NAME = 1 - TBL_NAME = 2 + CATNAME = 1 + DB_NAME = 2 + TBL_NAME = 3 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'} } @@ -1687,6 +1826,7 @@ class CheckConstraintsRequest def struct_fields; FIELDS; end def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field catName is unset!') unless @catName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field db_name is unset!') unless @db_name raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tbl_name is unset!') unless @tbl_name end @@ -1716,11 +1856,13 @@ class DropConstraintRequest DBNAME = 1 TABLENAME = 2 CONSTRAINTNAME = 3 + CATNAME = 4 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tablename'}, - CONSTRAINTNAME => {:type => ::Thrift::Types::STRING, :name => 'constraintname'} + CONSTRAINTNAME => {:type => ::Thrift::Types::STRING, :name => 'constraintname'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1863,13 +2005,15 @@ class PartitionsByExprRequest EXPR = 3 DEFAULTPARTITIONNAME = 4 MAXPARTS = 5 + CATNAME = 6 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, EXPR => {:type => ::Thrift::Types::STRING, :name => 'expr', :binary => true}, DEFAULTPARTITIONNAME => {:type => ::Thrift::Types::STRING, :name => 'defaultPartitionName', :optional => true}, - MAXPARTS => {:type => ::Thrift::Types::I16, :name => 'maxParts', :default => -1, :optional => true} + MAXPARTS => {:type => ::Thrift::Types::I16, :name => 'maxParts', :default => -1, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1922,11 +2066,13 @@ class TableStatsRequest DBNAME = 1 TBLNAME = 2 COLNAMES = 3 + CATNAME = 4 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, - COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}} + COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1946,12 +2092,14 @@ class PartitionsStatsRequest TBLNAME = 2 COLNAMES = 3 PARTNAMES = 4 + CATNAME = 5 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}}, - PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}} + PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1989,13 +2137,15 @@ class AddPartitionsRequest PARTS = 3 IFNOTEXISTS = 4 NEEDRESULT = 5 + CATNAME = 6 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, PARTS => {:type => ::Thrift::Types::LIST, :name => 'parts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}}, IFNOTEXISTS => {:type => ::Thrift::Types::BOOL, :name => 'ifNotExists'}, - NEEDRESULT => {:type => ::Thrift::Types::BOOL, :name => 'needResult', :default => true, :optional => true} + NEEDRESULT => {:type => ::Thrift::Types::BOOL, :name => 'needResult', :default => true, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -2084,6 +2234,7 @@ class DropPartitionsRequest IGNOREPROTECTION = 6 ENVIRONMENTCONTEXT = 7 NEEDRESULT = 8 + CATNAME = 9 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -2093,7 +2244,8 @@ class DropPartitionsRequest IFEXISTS => {:type => ::Thrift::Types::BOOL, :name => 'ifExists', :default => true, :optional => true}, IGNOREPROTECTION => {:type => ::Thrift::Types::BOOL, :name => 'ignoreProtection', :optional => true}, ENVIRONMENTCONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environmentContext', :class => ::EnvironmentContext, :optional => true}, - NEEDRESULT => {:type => ::Thrift::Types::BOOL, :name => 'needResult', :default => true, :optional => true} + NEEDRESULT => {:type => ::Thrift::Types::BOOL, :name => 'needResult', :default => true, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -2117,6 +2269,7 @@ class PartitionValuesRequest PARTITIONORDER = 6 ASCENDING = 7 MAXPARTS = 8 + CATNAME = 9 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -2126,7 +2279,8 @@ class PartitionValuesRequest FILTER => {:type => ::Thrift::Types::STRING, :name => 'filter', :optional => true}, PARTITIONORDER => {:type => ::Thrift::Types::LIST, :name => 'partitionOrder', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FieldSchema}, :optional => true}, ASCENDING => {:type => ::Thrift::Types::BOOL, :name => 'ascending', :default => true, :optional => true}, - MAXPARTS => {:type => ::Thrift::Types::I64, :name => 'maxParts', :default => -1, :optional => true} + MAXPARTS => {:type => ::Thrift::Types::I64, :name => 'maxParts', :default => -1, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -2205,6 +2359,7 @@ class Function CREATETIME = 6 FUNCTIONTYPE = 7 RESOURCEURIS = 8 + CATNAME = 9 FIELDS = { FUNCTIONNAME => {:type => ::Thrift::Types::STRING, :name => 'functionName'}, @@ -2214,7 +2369,8 @@ class Function OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :enum_class => ::PrincipalType}, CREATETIME => {:type => ::Thrift::Types::I32, :name => 'createTime'}, FUNCTIONTYPE => {:type => ::Thrift::Types::I32, :name => 'functionType', :enum_class => ::FunctionType}, - RESOURCEURIS => {:type => ::Thrift::Types::LIST, :name => 'resourceUris', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ResourceUri}} + RESOURCEURIS => {:type => ::Thrift::Types::LIST, :name => 'resourceUris', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ResourceUri}}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -3015,12 +3171,14 @@ end class CreationMetadata include ::Thrift::Struct, ::Thrift::Struct_Union - DBNAME = 1 - TBLNAME = 2 - TABLESUSED = 3 - VALIDTXNLIST = 4 + CATNAME = 1 + DBNAME = 2 + TBLNAME = 3 + TABLESUSED = 4 + VALIDTXNLIST = 5 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, TABLESUSED => {:type => ::Thrift::Types::SET, :name => 'tablesUsed', :element => {:type => ::Thrift::Types::STRING}}, @@ -3030,6 +3188,7 @@ class CreationMetadata def struct_fields; FIELDS; end def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field catName is unset!') unless @catName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tablesUsed is unset!') unless @tablesUsed @@ -3066,6 +3225,7 @@ class NotificationEvent TABLENAME = 5 MESSAGE = 6 MESSAGEFORMAT = 7 + CATNAME = 8 FIELDS = { EVENTID => {:type => ::Thrift::Types::I64, :name => 'eventId'}, @@ -3074,7 +3234,8 @@ class NotificationEvent DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName', :optional => true}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName', :optional => true}, MESSAGE => {:type => ::Thrift::Types::STRING, :name => 'message'}, - MESSAGEFORMAT => {:type => ::Thrift::Types::STRING, :name => 'messageFormat', :optional => true} + MESSAGEFORMAT => {:type => ::Thrift::Types::STRING, :name => 'messageFormat', :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -3127,10 +3288,12 @@ class NotificationEventsCountRequest include ::Thrift::Struct, ::Thrift::Struct_Union FROMEVENTID = 1 DBNAME = 2 + CATNAME = 3 FIELDS = { FROMEVENTID => {:type => ::Thrift::Types::I64, :name => 'fromEventId'}, - DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'} + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -3211,13 +3374,15 @@ class FireEventRequest DBNAME = 3 TABLENAME = 4 PARTITIONVALS = 5 + CATNAME = 6 FIELDS = { SUCCESSFUL => {:type => ::Thrift::Types::BOOL, :name => 'successful'}, DATA => {:type => ::Thrift::Types::STRUCT, :name => 'data', :class => ::FireEventRequestData}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName', :optional => true}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName', :optional => true}, - PARTITIONVALS => {:type => ::Thrift::Types::LIST, :name => 'partitionVals', :element => {:type => ::Thrift::Types::STRING}, :optional => true} + PARTITIONVALS => {:type => ::Thrift::Types::LIST, :name => 'partitionVals', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -3498,11 +3663,13 @@ class GetTableRequest DBNAME = 1 TBLNAME = 2 CAPABILITIES = 3 + CATNAME = 4 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, - CAPABILITIES => {:type => ::Thrift::Types::STRUCT, :name => 'capabilities', :class => ::ClientCapabilities, :optional => true} + CAPABILITIES => {:type => ::Thrift::Types::STRUCT, :name => 'capabilities', :class => ::ClientCapabilities, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -3537,11 +3704,13 @@ class GetTablesRequest DBNAME = 1 TBLNAMES = 2 CAPABILITIES = 3 + CATNAME = 4 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAMES => {:type => ::Thrift::Types::LIST, :name => 'tblNames', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, - CAPABILITIES => {:type => ::Thrift::Types::STRUCT, :name => 'capabilities', :class => ::ClientCapabilities, :optional => true} + CAPABILITIES => {:type => ::Thrift::Types::STRUCT, :name => 'capabilities', :class => ::ClientCapabilities, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -3611,12 +3780,14 @@ class TableMeta TABLENAME = 2 TABLETYPE = 3 COMMENTS = 4 + CATNAME = 5 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, TABLETYPE => {:type => ::Thrift::Types::STRING, :name => 'tableType'}, - COMMENTS => {:type => ::Thrift::Types::STRING, :name => 'comments', :optional => true} + COMMENTS => {:type => ::Thrift::Types::STRING, :name => 'comments', :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -4422,16 +4593,18 @@ class ISchema include ::Thrift::Struct, ::Thrift::Struct_Union SCHEMATYPE = 1 NAME = 2 - DBNAME = 3 - COMPATIBILITY = 4 - VALIDATIONLEVEL = 5 - CANEVOLVE = 6 - SCHEMAGROUP = 7 - DESCRIPTION = 8 + CATNAME = 3 + DBNAME = 4 + COMPATIBILITY = 5 + VALIDATIONLEVEL = 6 + CANEVOLVE = 7 + SCHEMAGROUP = 8 + DESCRIPTION = 9 FIELDS = { SCHEMATYPE => {:type => ::Thrift::Types::I32, :name => 'schemaType', :enum_class => ::SchemaType}, NAME => {:type => ::Thrift::Types::STRING, :name => 'name'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, COMPATIBILITY => {:type => ::Thrift::Types::I32, :name => 'compatibility', :enum_class => ::SchemaCompatibility}, VALIDATIONLEVEL => {:type => ::Thrift::Types::I32, :name => 'validationLevel', :enum_class => ::SchemaValidation}, @@ -4459,10 +4632,12 @@ end class ISchemaName include ::Thrift::Struct, ::Thrift::Struct_Union - DBNAME = 1 - SCHEMANAME = 2 + CATNAME = 1 + DBNAME = 2 + SCHEMANAME = 3 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, SCHEMANAME => {:type => ::Thrift::Types::STRING, :name => 'schemaName'} } diff --git standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 7a07b73cc7..c1036755b4 100644 --- standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -43,6 +43,73 @@ module ThriftHiveMetastore return end + def create_catalog(catalog) + send_create_catalog(catalog) + recv_create_catalog() + end + + def send_create_catalog(catalog) + send_message('create_catalog', Create_catalog_args, :catalog => catalog) + end + + def recv_create_catalog() + result = receive_message(Create_catalog_result) + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + return + end + + def get_catalog(catName) + send_get_catalog(catName) + return recv_get_catalog() + end + + def send_get_catalog(catName) + send_message('get_catalog', Get_catalog_args, :catName => catName) + end + + def recv_get_catalog() + result = receive_message(Get_catalog_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_catalog failed: unknown result') + end + + def get_catalogs() + send_get_catalogs() + return recv_get_catalogs() + end + + def send_get_catalogs() + send_message('get_catalogs', Get_catalogs_args) + end + + def recv_get_catalogs() + result = receive_message(Get_catalogs_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_catalogs failed: unknown result') + end + + def drop_catalog(catName) + send_drop_catalog(catName) + recv_drop_catalog() + end + + def send_drop_catalog(catName) + send_message('drop_catalog', Drop_catalog_args, :catName => catName) + end + + def recv_drop_catalog() + result = receive_message(Drop_catalog_result) + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + return + end + def create_database(database) send_create_database(database) recv_create_database() @@ -660,13 +727,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_materialization_invalidation_info failed: unknown result') end - def update_creation_metadata(dbname, tbl_name, creation_metadata) - send_update_creation_metadata(dbname, tbl_name, creation_metadata) + def update_creation_metadata(catName, dbname, tbl_name, creation_metadata) + send_update_creation_metadata(catName, dbname, tbl_name, creation_metadata) recv_update_creation_metadata() end - def send_update_creation_metadata(dbname, tbl_name, creation_metadata) - send_message('update_creation_metadata', Update_creation_metadata_args, :dbname => dbname, :tbl_name => tbl_name, :creation_metadata => creation_metadata) + def send_update_creation_metadata(catName, dbname, tbl_name, creation_metadata) + send_message('update_creation_metadata', Update_creation_metadata_args, :catName => catName, :dbname => dbname, :tbl_name => tbl_name, :creation_metadata => creation_metadata) end def recv_update_creation_metadata() @@ -3308,6 +3375,60 @@ module ThriftHiveMetastore write_result(result, oprot, 'setMetaConf', seqid) end + def process_create_catalog(seqid, iprot, oprot) + args = read_args(iprot, Create_catalog_args) + result = Create_catalog_result.new() + begin + @handler.create_catalog(args.catalog) + rescue ::AlreadyExistsException => o1 + result.o1 = o1 + rescue ::InvalidObjectException => o2 + result.o2 = o2 + rescue ::MetaException => o3 + result.o3 = o3 + end + write_result(result, oprot, 'create_catalog', seqid) + end + + def process_get_catalog(seqid, iprot, oprot) + args = read_args(iprot, Get_catalog_args) + result = Get_catalog_result.new() + begin + result.success = @handler.get_catalog(args.catName) + rescue ::NoSuchObjectException => o1 + result.o1 = o1 + rescue ::MetaException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'get_catalog', seqid) + end + + def process_get_catalogs(seqid, iprot, oprot) + args = read_args(iprot, Get_catalogs_args) + result = Get_catalogs_result.new() + begin + result.success = @handler.get_catalogs() + rescue ::MetaException => o1 + result.o1 = o1 + end + write_result(result, oprot, 'get_catalogs', seqid) + end + + def process_drop_catalog(seqid, iprot, oprot) + args = read_args(iprot, Drop_catalog_args) + result = Drop_catalog_result.new() + begin + @handler.drop_catalog(args.catName) + rescue ::NoSuchObjectException => o1 + result.o1 = o1 + rescue ::InvalidOperationException => o2 + result.o2 = o2 + rescue ::MetaException => o3 + result.o3 = o3 + end + write_result(result, oprot, 'drop_catalog', seqid) + end + def process_create_database(seqid, iprot, oprot) args = read_args(iprot, Create_database_args) result = Create_database_result.new() @@ -3799,7 +3920,7 @@ module ThriftHiveMetastore args = read_args(iprot, Update_creation_metadata_args) result = Update_creation_metadata_result.new() begin - @handler.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata) + @handler.update_creation_metadata(args.catName, args.dbname, args.tbl_name, args.creation_metadata) rescue ::MetaException => o1 result.o1 = o1 rescue ::InvalidOperationException => o2 @@ -5826,6 +5947,147 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Create_catalog_args + include ::Thrift::Struct, ::Thrift::Struct_Union + CATALOG = 1 + + FIELDS = { + CATALOG => {:type => ::Thrift::Types::STRUCT, :name => 'catalog', :class => ::CreateCatalogRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Create_catalog_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + O2 = 2 + O3 = 3 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::AlreadyExistsException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidObjectException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_catalog_args + include ::Thrift::Struct, ::Thrift::Struct_Union + CATNAME = 1 + + FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRUCT, :name => 'catName', :class => ::GetCatalogRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_catalog_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::GetCatalogResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_catalogs_args + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_catalogs_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::GetCatalogsResponse}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Drop_catalog_args + include ::Thrift::Struct, ::Thrift::Struct_Union + CATNAME = 1 + + FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRUCT, :name => 'catName', :class => ::DropCatalogRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Drop_catalog_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + O2 = 2 + O3 = 3 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidOperationException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Create_database_args include ::Thrift::Struct, ::Thrift::Struct_Union DATABASE = 1 @@ -7197,11 +7459,13 @@ module ThriftHiveMetastore class Update_creation_metadata_args include ::Thrift::Struct, ::Thrift::Struct_Union - DBNAME = 1 - TBL_NAME = 2 - CREATION_METADATA = 3 + CATNAME = 1 + DBNAME = 2 + TBL_NAME = 3 + CREATION_METADATA = 4 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, CREATION_METADATA => {:type => ::Thrift::Types::STRUCT, :name => 'creation_metadata', :class => ::CreationMetadata} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java index bdac1618d6..8e920bb992 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java @@ -152,16 +152,16 @@ public Float getHitRatio() { * Return aggregate stats for a column from the cache or null. * While reading from the nodelist for a key, we wait maxReaderWaitTime to acquire the lock, * failing which we return a cache miss (i.e. null) - * - * @param dbName - * @param tblName - * @param colName - * @param partNames - * @return + * @param catName catalog name + * @param dbName database name + * @param tblName table name + * @param colName column name + * @param partNames list of partition names + * @return aggregated col stats */ - public AggrColStats get(String dbName, String tblName, String colName, List partNames) { + public AggrColStats get(String catName, String dbName, String tblName, String colName, List partNames) { // Cache key - Key key = new Key(dbName, tblName, colName); + Key key = new Key(catName, dbName, tblName, colName); AggrColStatsList candidateList = cacheStore.get(key); // No key, or no nodes in candidate list if ((candidateList == null) || (candidateList.nodes.size() == 0)) { @@ -267,23 +267,23 @@ private AggrColStats findBestMatch(List partNames, List ca * Add a new node to the cache; may trigger the cleaner thread if the cache is near full capacity. * We'll however add the node even if we temporaily exceed maxCacheNodes, because the cleaner * will eventually create space from expired nodes or by removing LRU nodes. - * - * @param dbName - * @param tblName - * @param colName + * @param catName catalog name + * @param dbName database name + * @param tblName table name + * @param colName column name * @param numPartsCached * @param colStats * @param bloomFilter */ // TODO: make add asynchronous: add shouldn't block the higher level calls - public void add(String dbName, String tblName, String colName, long numPartsCached, + public void add(String catName, String dbName, String tblName, String colName, long numPartsCached, ColumnStatisticsObj colStats, BloomFilter bloomFilter) { // If we have no space in the cache, run cleaner thread if (getCurrentNodes() / maxCacheNodes > maxFull) { spawnCleaner(); } // Cache key - Key key = new Key(dbName, tblName, colName); + Key key = new Key(catName, dbName, tblName, colName); // Add new node to the cache AggrColStats node = new AggrColStats(numPartsCached, bloomFilter, colStats); AggrColStatsList nodeList; @@ -463,15 +463,17 @@ private boolean isExpired(AggrColStats aggrColStats) { * Key object for the stats cache hashtable */ static class Key { + private final String catName; private final String dbName; private final String tblName; private final String colName; - Key(String db, String table, String col) { + Key(String cat, String db, String table, String col) { // Don't construct an illegal cache key - if ((db == null) || (table == null) || (col == null)) { - throw new IllegalArgumentException("dbName, tblName, colName can't be null"); + if (cat == null || (db == null) || (table == null) || (col == null)) { + throw new IllegalArgumentException("catName, dbName, tblName, colName can't be null"); } + catName = cat; dbName = db; tblName = table; colName = col; @@ -483,18 +485,20 @@ public boolean equals(Object other) { return false; } Key that = (Key) other; - return dbName.equals(that.dbName) && tblName.equals(that.tblName) - && colName.equals(that.colName); + return catName.equals(that.catName) && dbName.equals(that.dbName) && + tblName.equals(that.tblName) && colName.equals(that.colName); } @Override public int hashCode() { - return dbName.hashCode() * 31 + tblName.hashCode() * 31 + colName.hashCode(); + return catName.hashCode() * 31 + dbName.hashCode() * 31 + tblName.hashCode() * 31 + + colName.hashCode(); } @Override public String toString() { - return "database:" + dbName + ", table:" + tblName + ", column:" + colName; + return "catalog: " + catName + ", database:" + dbName + ", table:" + tblName + ", column:" + + colName; } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java index fc0b4d7d08..050dca9abf 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java @@ -35,7 +35,7 @@ /** * @deprecated As of release 2.2.0. Replaced by {@link #alterTable(RawStore, Warehouse, String, - * String, Table, EnvironmentContext, IHMSHandler)} + * String, String, Table, EnvironmentContext, IHMSHandler)} * * handles alter table, the changes could be cascaded to partitions if applicable * @@ -43,6 +43,8 @@ * object to get metadata * @param wh * Hive Warehouse where table data is stored + * @param catName + * catalog of the table being altered * @param dbname * database of the table being altered * @param name @@ -56,9 +58,11 @@ * thrown if there is any other error */ @Deprecated - void alterTable(RawStore msdb, Warehouse wh, String dbname, + default void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname, String name, Table newTable, EnvironmentContext envContext) - throws InvalidOperationException, MetaException; + throws InvalidOperationException, MetaException { + alterTable(msdb, wh, catName, dbname, name, newTable, envContext, null); + } /** * handles alter table, the changes could be cascaded to partitions if applicable @@ -67,6 +71,7 @@ void alterTable(RawStore msdb, Warehouse wh, String dbname, * object to get metadata * @param wh * Hive Warehouse where table data is stored + * @param catName catalog of the table being altered * @param dbname * database of the table being altered * @param name @@ -81,7 +86,7 @@ void alterTable(RawStore msdb, Warehouse wh, String dbname, * @throws MetaException * thrown if there is any other error */ - void alterTable(RawStore msdb, Warehouse wh, String dbname, + void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname, String name, Table newTable, EnvironmentContext envContext, IHMSHandler handler) throws InvalidOperationException, MetaException; @@ -119,7 +124,8 @@ Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname, * * @param msdb * object to get metadata - * @param wh + * @param wh physical warehouse class + * @param catName catalog name * @param dbname * database of the partition being altered * @param name @@ -136,14 +142,15 @@ Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname, * @throws AlreadyExistsException * @throws MetaException */ - Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname, - final String name, final List part_vals, final Partition new_part, EnvironmentContext environmentContext, - IHMSHandler handler) + Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName, + final String dbname, final String name, final List part_vals, + final Partition new_part, EnvironmentContext environmentContext, + IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException; /** - * @deprecated As of release 2.2.0. Replaced by {@link #alterPartitions(RawStore, Warehouse, String, - * String, List, EnvironmentContext, IHMSHandler)} + * @deprecated As of release 3.0.0. Replaced by {@link #alterPartitions(RawStore, Warehouse, String, + * String, String, List, EnvironmentContext, IHMSHandler)} * * handles alter partitions * @@ -188,7 +195,7 @@ Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname, * @throws AlreadyExistsException * @throws MetaException */ - List alterPartitions(final RawStore msdb, Warehouse wh, + List alterPartitions(final RawStore msdb, Warehouse wh, final String catName, final String dbname, final String name, final List new_parts, EnvironmentContext environmentContext,IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultMetaStoreFilterHookImpl.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultMetaStoreFilterHookImpl.java index ca63333f5d..4e1dabab11 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultMetaStoreFilterHookImpl.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultMetaStoreFilterHookImpl.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionSpec; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TableMeta; /** * Default no-op implementation of the MetaStoreFilterHook that returns the result as is @@ -47,11 +48,17 @@ public Database filterDatabase(Database dataBase) throws NoSuchObjectException { } @Override - public List filterTableNames(String dbName, List tableList) throws MetaException { + public List filterTableNames(String catName, String dbName, List tableList) + throws MetaException { return tableList; } @Override + public List filterTableMetas(List tableMetas) throws MetaException { + return tableMetas; + } + + @Override public Table filterTable(Table table) throws NoSuchObjectException { return table; } @@ -78,7 +85,7 @@ public Partition filterPartition(Partition partition) throws NoSuchObjectExcept } @Override - public List filterPartitionNames(String dbName, String tblName, + public List filterPartitionNames(String catName, String dbName, String tblName, List partitionNames) throws MetaException { return partitionNames; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 04828e521f..ed1b8c5cc2 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -61,6 +61,10 @@ import java.util.Map; import java.util.Map.Entry; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; +import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; + /** * Hive specific implementation of alter */ @@ -85,16 +89,10 @@ public void setConf(Configuration conf) { } @Override - public void alterTable(RawStore msdb, Warehouse wh, String dbname, - String name, Table newt, EnvironmentContext environmentContext) - throws InvalidOperationException, MetaException { - alterTable(msdb, wh, dbname, name, newt, environmentContext, null); - } - - @Override - public void alterTable(RawStore msdb, Warehouse wh, String dbname, + public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname, String name, Table newt, EnvironmentContext environmentContext, IHMSHandler handler) throws InvalidOperationException, MetaException { + catName = normalizeIdentifier(catName); name = name.toLowerCase(); dbname = dbname.toLowerCase(); @@ -135,9 +133,15 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, boolean isPartitionedTable = false; List parts; + // Switching tables between catalogs is not allowed. + if (!catName.equalsIgnoreCase(newt.getCatName())) { + throw new InvalidOperationException("Tables cannot be moved between catalogs, old catalog" + + catName + ", new catalog " + newt.getCatName()); + } + // check if table with the new name already exists if (!newTblName.equals(name) || !newDbName.equals(dbname)) { - if (msdb.getTable(newDbName, newTblName) != null) { + if (msdb.getTable(catName, newDbName, newTblName) != null) { throw new InvalidOperationException("new table " + newDbName + "." + newTblName + " already exists"); } @@ -146,9 +150,10 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, msdb.openTransaction(); // get old table - oldt = msdb.getTable(dbname, name); + oldt = msdb.getTable(catName, dbname, name); if (oldt == null) { - throw new InvalidOperationException("table " + dbname + "." + name + " doesn't exist"); + throw new InvalidOperationException("table " + + Warehouse.getCatalogQualifiedTableName(catName, dbname, name) + " doesn't exist"); } if (oldt.getPartitionKeysSize() != 0) { @@ -188,7 +193,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, && (oldt.getSd().getLocation().compareTo(newt.getSd().getLocation()) == 0 || StringUtils.isEmpty(newt.getSd().getLocation())) && !MetaStoreUtils.isExternalTable(oldt)) { - Database olddb = msdb.getDatabase(dbname); + Database olddb = msdb.getDatabase(catName, dbname); // if a table was created in a user specified location using the DDL like // create table tbl ... location ...., it should be treated like an external table // in the table rename, its data location should not be changed. We can check @@ -204,7 +209,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, srcFs = wh.getFs(srcPath); // get new location - Database db = msdb.getDatabase(newDbName); + Database db = msdb.getDatabase(catName, newDbName); Path databasePath = constructRenamedPath(wh.getDatabasePath(db), srcPath); destPath = new Path(databasePath, newTblName); destFs = wh.getFs(destPath); @@ -222,8 +227,9 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, try { if (destFs.exists(destPath)) { - throw new InvalidOperationException("New location for this table " - + newDbName + "." + newTblName + " already exists : " + destPath); + throw new InvalidOperationException("New location for this table " + + Warehouse.getCatalogQualifiedTableName(catName, newDbName, newTblName) + + " already exists : " + destPath); } // check that src exists and also checks permissions necessary, rename src to dest if (srcFs.exists(srcPath) && wh.renameDir(srcPath, destPath, true)) { @@ -242,7 +248,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, String newTblLocPath = dataWasMoved ? destPath.toUri().getPath() : null; // also the location field in partition - parts = msdb.getPartitions(dbname, name, -1); + parts = msdb.getPartitions(catName, dbname, name, -1); Map columnStatsNeedUpdated = new HashMap<>(); for (Partition part : parts) { String oldPartLoc = part.getSd().getLocation(); @@ -254,13 +260,13 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, } part.setDbName(newDbName); part.setTableName(newTblName); - ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, dbname, name, + ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, part.getValues(), part.getSd().getCols(), oldt, part, null); if (colStats != null) { columnStatsNeedUpdated.put(part, colStats); } } - msdb.alterTable(dbname, name, newt); + msdb.alterTable(catName, dbname, name, newt); // alterPartition is only for changing the partition location in the table rename if (dataWasMoved) { @@ -278,7 +284,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, for (Partition part : partBatch) { partValues.add(part.getValues()); } - msdb.alterPartitions(newDbName, newTblName, partValues, partBatch); + msdb.alterPartitions(catName, newDbName, newTblName, partValues, partBatch); } } @@ -295,7 +301,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, // operations other than table rename if (MetaStoreUtils.requireCalStats(null, null, newt, environmentContext) && !isPartitionedTable) { - Database db = msdb.getDatabase(newDbName); + Database db = msdb.getDatabase(catName, newDbName); // Update table stats. For partitioned table, we update stats in alterPartition() MetaStoreUtils.updateTableStatsFast(db, newt, wh, false, true, environmentContext, false); } @@ -303,23 +309,23 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, if (isPartitionedTable) { //Currently only column related changes can be cascaded in alter table if(!MetaStoreUtils.areSameColumns(oldt.getSd().getCols(), newt.getSd().getCols())) { - parts = msdb.getPartitions(dbname, name, -1); + parts = msdb.getPartitions(catName, dbname, name, -1); for (Partition part : parts) { Partition oldPart = new Partition(part); List oldCols = part.getSd().getCols(); part.getSd().setCols(newt.getSd().getCols()); - ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, dbname, name, + ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, part.getValues(), oldCols, oldt, part, null); assert(colStats == null); if (cascade) { - msdb.alterPartition(dbname, name, part.getValues(), part); + msdb.alterPartition(catName, dbname, name, part.getValues(), part); } else { // update changed properties (stats) oldPart.setParameters(part.getParameters()); - msdb.alterPartition(dbname, name, part.getValues(), oldPart); + msdb.alterPartition(catName, dbname, name, part.getValues(), oldPart); } } - msdb.alterTable(dbname, name, newt); + msdb.alterTable(catName, dbname, name, newt); } else { LOG.warn("Alter table not cascaded to partitions."); alterTableUpdateTableColumnStats(msdb, oldt, newt); @@ -345,7 +351,8 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, new CreateTableEvent(newt, true, handler), environmentContext); if (isPartitionedTable) { - parts = msdb.getPartitions(newt.getDbName(), newt.getTableName(), -1); + String cName = newt.isSetCatName() ? newt.getCatName() : DEFAULT_CATALOG_NAME; + parts = msdb.getPartitions(cName, newt.getDbName(), newt.getTableName(), -1); MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ADD_PARTITION, new AddPartitionEvent(newt, parts, true, handler), @@ -372,7 +379,8 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, + " Check metastore logs for detailed stack." + e.getMessage()); } finally { if (!success) { - LOG.error("Failed to alter table " + dbname + "." + name); + LOG.error("Failed to alter table " + + Warehouse.getCatalogQualifiedTableName(catName, dbname, name)); msdb.rollbackTransaction(); if (dataWasMoved) { try { @@ -413,13 +421,15 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String final String name, final List part_vals, final Partition new_part, EnvironmentContext environmentContext) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { - return alterPartition(msdb, wh, dbname, name, part_vals, new_part, environmentContext, null); + return alterPartition(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, part_vals, new_part, + environmentContext, null); } @Override - public Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname, - final String name, final List part_vals, final Partition new_part, - EnvironmentContext environmentContext, IHMSHandler handler) + public Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName, + final String dbname, final String name, + final List part_vals, final Partition new_part, + EnvironmentContext environmentContext, IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { boolean success = false; Partition oldPart; @@ -436,18 +446,17 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String .currentTimeMillis() / 1000)); } - //alter partition if (part_vals == null || part_vals.size() == 0) { try { msdb.openTransaction(); - Table tbl = msdb.getTable(dbname, name); + Table tbl = msdb.getTable(catName, dbname, name); if (tbl == null) { throw new InvalidObjectException( "Unable to alter partition because table or database does not exist."); } - oldPart = msdb.getPartition(dbname, name, new_part.getValues()); + oldPart = msdb.getPartition(catName, dbname, name, new_part.getValues()); if (MetaStoreUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) { // if stats are same, no need to update if (MetaStoreUtils.isFastStatsSame(oldPart, new_part)) { @@ -460,10 +469,10 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String // PartitionView does not have SD. We do not need update its column stats if (oldPart.getSd() != null) { - updateOrGetPartitionColumnStats(msdb, dbname, name, new_part.getValues(), + updateOrGetPartitionColumnStats(msdb, catName, dbname, name, new_part.getValues(), oldPart.getSd().getCols(), tbl, new_part, null); } - msdb.alterPartition(dbname, name, new_part.getValues(), new_part); + msdb.alterPartition(catName, dbname, name, new_part.getValues(), new_part); if (transactionalListeners != null && !transactionalListeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_PARTITION, @@ -496,13 +505,13 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String boolean dataWasMoved = false; try { msdb.openTransaction(); - Table tbl = msdb.getTable(dbname, name); + Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name); if (tbl == null) { throw new InvalidObjectException( "Unable to alter partition because table or database does not exist."); } try { - oldPart = msdb.getPartition(dbname, name, part_vals); + oldPart = msdb.getPartition(catName, dbname, name, part_vals); } catch (NoSuchObjectException e) { // this means there is no existing partition throw new InvalidObjectException( @@ -511,7 +520,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String Partition check_part; try { - check_part = msdb.getPartition(dbname, name, new_part.getValues()); + check_part = msdb.getPartition(catName, dbname, name, new_part.getValues()); } catch(NoSuchObjectException e) { // this means there is no existing partition check_part = null; @@ -530,7 +539,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String try { // if tbl location is available use it // else derive the tbl location from database location - destPath = wh.getPartitionPath(msdb.getDatabase(dbname), tbl, new_part.getValues()); + destPath = wh.getPartitionPath(msdb.getDatabase(catName, dbname), tbl, new_part.getValues()); destPath = constructRenamedPath(destPath, new Path(new_part.getSd().getLocation())); } catch (NoSuchObjectException e) { LOG.debug("Didn't find object in metastore ", e); @@ -593,9 +602,9 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String } String newPartName = Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues()); - ColumnStatistics cs = updateOrGetPartitionColumnStats(msdb, dbname, name, oldPart.getValues(), + ColumnStatistics cs = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldPart.getValues(), oldPart.getSd().getCols(), tbl, new_part, null); - msdb.alterPartition(dbname, name, part_vals, new_part); + msdb.alterPartition(catName, dbname, name, part_vals, new_part); if (cs != null) { cs.getStatsDesc().setPartName(newPartName); try { @@ -643,13 +652,15 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String final String name, final List new_parts, EnvironmentContext environmentContext) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { - return alterPartitions(msdb, wh, dbname, name, new_parts, environmentContext, null); + return alterPartitions(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, new_parts, + environmentContext, null); } @Override - public List alterPartitions(final RawStore msdb, Warehouse wh, final String dbname, - final String name, final List new_parts, EnvironmentContext environmentContext, - IHMSHandler handler) + public List alterPartitions(final RawStore msdb, Warehouse wh, final String catName, + final String dbname, final String name, + final List new_parts, + EnvironmentContext environmentContext, IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { List oldParts = new ArrayList<>(); List> partValsList = new ArrayList<>(); @@ -658,12 +669,11 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String transactionalListeners = handler.getTransactionalListeners(); } - boolean success = false; try { msdb.openTransaction(); - Table tbl = msdb.getTable(dbname, name); + Table tbl = msdb.getTable(catName, dbname, name); if (tbl == null) { throw new InvalidObjectException( "Unable to alter partitions because table or database does not exist."); @@ -677,7 +687,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String .currentTimeMillis() / 1000)); } - Partition oldTmpPart = msdb.getPartition(dbname, name, tmpPart.getValues()); + Partition oldTmpPart = msdb.getPartition(catName, dbname, name, tmpPart.getValues()); oldParts.add(oldTmpPart); partValsList.add(tmpPart.getValues()); @@ -693,12 +703,12 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String // PartitionView does not have SD and we do not need to update its column stats if (oldTmpPart.getSd() != null) { - updateOrGetPartitionColumnStats(msdb, dbname, name, oldTmpPart.getValues(), + updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldTmpPart.getValues(), oldTmpPart.getSd().getCols(), tbl, tmpPart, null); } } - msdb.alterPartitions(dbname, name, partValsList, new_parts); + msdb.alterPartitions(catName, dbname, name, partValsList, new_parts); Iterator oldPartsIt = oldParts.iterator(); for (Partition newPart : new_parts) { Partition oldPart; @@ -768,10 +778,12 @@ private Path constructRenamedPath(Path defaultNewPath, Path currentPath) { @VisibleForTesting void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable) throws MetaException, InvalidObjectException { + String catName = normalizeIdentifier(oldTable.isSetCatName() ? oldTable.getCatName() : + getDefaultCatalog(conf)); String dbName = oldTable.getDbName().toLowerCase(); - String tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier(oldTable.getTableName()); + String tableName = normalizeIdentifier(oldTable.getTableName()); String newDbName = newTable.getDbName().toLowerCase(); - String newTableName = org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier(newTable.getTableName()); + String newTableName = normalizeIdentifier(newTable.getTableName()); try { List oldCols = oldTable.getSd().getCols(); @@ -794,7 +806,7 @@ void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTa } // Collect column stats which need to be rewritten and remove old stats - colStats = msdb.getTableColumnStatistics(dbName, tableName, oldColNames); + colStats = msdb.getTableColumnStatistics(catName, dbName, tableName, oldColNames); if (colStats == null) { updateColumnStats = false; } else { @@ -813,12 +825,12 @@ void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTa if (found) { if (!newDbName.equals(dbName) || !newTableName.equals(tableName)) { - msdb.deleteTableColumnStatistics(dbName, tableName, statsObj.getColName()); + msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName()); newStatsObjs.add(statsObj); deletedCols.add(statsObj.getColName()); } } else { - msdb.deleteTableColumnStatistics(dbName, tableName, statsObj.getColName()); + msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName()); deletedCols.add(statsObj.getColName()); } } @@ -828,7 +840,7 @@ void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTa } // Change to new table and append stats for the new table - msdb.alterTable(dbName, tableName, newTable); + msdb.alterTable(catName, dbName, tableName, newTable); if (updateColumnStats && !newStatsObjs.isEmpty()) { ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); statsDesc.setDbName(newDbName); @@ -845,7 +857,7 @@ void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTa } private ColumnStatistics updateOrGetPartitionColumnStats( - RawStore msdb, String dbname, String tblname, List partVals, + RawStore msdb, String catName, String dbname, String tblname, List partVals, List oldCols, Table table, Partition part, List newCols) throws MetaException, InvalidObjectException { ColumnStatistics newPartsColStats = null; @@ -868,7 +880,7 @@ private ColumnStatistics updateOrGetPartitionColumnStats( oldColNames.add(oldCol.getName()); } List oldPartNames = Lists.newArrayList(oldPartName); - List partsColStats = msdb.getPartitionColumnStatistics(dbname, tblname, + List partsColStats = msdb.getPartitionColumnStatistics(catName, dbname, tblname, oldPartNames, oldColNames); assert (partsColStats.size() <= 1); for (ColumnStatistics partColStats : partsColStats) { //actually only at most one loop @@ -886,12 +898,12 @@ private ColumnStatistics updateOrGetPartitionColumnStats( } if (found) { if (rename) { - msdb.deletePartitionColumnStatistics(dbname, tblname, partColStats.getStatsDesc().getPartName(), + msdb.deletePartitionColumnStatistics(catName, dbname, tblname, partColStats.getStatsDesc().getPartName(), partVals, statsObj.getColName()); newStatsObjs.add(statsObj); } } else { - msdb.deletePartitionColumnStatistics(dbname, tblname, partColStats.getStatsDesc().getPartName(), + msdb.deletePartitionColumnStatistics(catName, dbname, tblname, partColStats.getStatsDesc().getPartName(), partVals, statsObj.getColName()); deletedCols.add(statsObj.getColName()); } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 519e8fefac..3dc5c22e6b 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -20,6 +20,14 @@ import static org.apache.commons.lang.StringUtils.join; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMENT; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.parseDbName; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CAT_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DB_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependNotNullCatToDbName; import java.io.IOException; import java.net.InetAddress; @@ -85,11 +93,13 @@ import org.apache.hadoop.hive.metastore.events.AlterSchemaVersionEvent; import org.apache.hadoop.hive.metastore.events.AlterTableEvent; import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent; +import org.apache.hadoop.hive.metastore.events.CreateCatalogEvent; import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent; import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent; import org.apache.hadoop.hive.metastore.events.CreateISchemaEvent; import org.apache.hadoop.hive.metastore.events.AddSchemaVersionEvent; import org.apache.hadoop.hive.metastore.events.CreateTableEvent; +import org.apache.hadoop.hive.metastore.events.DropCatalogEvent; import org.apache.hadoop.hive.metastore.events.DropConstraintEvent; import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent; import org.apache.hadoop.hive.metastore.events.DropFunctionEvent; @@ -106,10 +116,12 @@ import org.apache.hadoop.hive.metastore.events.PreAlterSchemaVersionEvent; import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent; import org.apache.hadoop.hive.metastore.events.PreAuthorizationCallEvent; +import org.apache.hadoop.hive.metastore.events.PreCreateCatalogEvent; import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent; import org.apache.hadoop.hive.metastore.events.PreCreateISchemaEvent; import org.apache.hadoop.hive.metastore.events.PreAddSchemaVersionEvent; import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent; +import org.apache.hadoop.hive.metastore.events.PreDropCatalogEvent; import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent; import org.apache.hadoop.hive.metastore.events.PreDropISchemaEvent; import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent; @@ -117,6 +129,7 @@ import org.apache.hadoop.hive.metastore.events.PreDropTableEvent; import org.apache.hadoop.hive.metastore.events.PreEventContext; import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent; +import org.apache.hadoop.hive.metastore.events.PreReadCatalogEvent; import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent; import org.apache.hadoop.hive.metastore.events.PreReadISchemaEvent; import org.apache.hadoop.hive.metastore.events.PreReadTableEvent; @@ -161,6 +174,7 @@ import org.apache.thrift.transport.TServerSocket; import org.apache.thrift.transport.TTransport; import org.apache.thrift.transport.TTransportFactory; +import org.iq80.leveldb.DB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -671,21 +685,43 @@ public TxnStore getTxnHandler() { return txn; } - private static RawStore newRawStoreForConf(Configuration conf) throws MetaException { + static RawStore newRawStoreForConf(Configuration conf) throws MetaException { Configuration newConf = new Configuration(conf); String rawStoreClassName = MetastoreConf.getVar(newConf, ConfVars.RAW_STORE_IMPL); LOG.info(addPrefix("Opening raw store with implementation class:" + rawStoreClassName)); return RawStoreProxy.getProxy(newConf, conf, rawStoreClassName, threadLocalId.get()); } + @VisibleForTesting + public static void createDefaultCatalog(RawStore ms, Warehouse wh) throws MetaException, + InvalidOperationException { + try { + Catalog defaultCat = ms.getCatalog(DEFAULT_CATALOG_NAME); + // Null check because in some test cases we get a null from ms.getCatalog. + if (defaultCat !=null && defaultCat.getLocationUri().equals("TBD")) { + // One time update issue. When the new 'hive' catalog is created in an upgrade the + // script does not know the location of the warehouse. So we need to update it. + LOG.info("Setting location of default catalog, as it hasn't been done after upgrade"); + defaultCat.setLocationUri(wh.getWhRoot().toString()); + ms.alterCatalog(defaultCat.getName(), defaultCat); + } + + } catch (NoSuchObjectException e) { + Catalog cat = new Catalog(DEFAULT_CATALOG_NAME, wh.getWhRoot().toString()); + cat.setDescription(Warehouse.DEFAULT_CATALOG_COMMENT); + ms.createCatalog(cat); + } + } + private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObjectException { try { - ms.getDatabase(DEFAULT_DATABASE_NAME); + ms.getDatabase(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME); } catch (NoSuchObjectException e) { Database db = new Database(DEFAULT_DATABASE_NAME, DEFAULT_DATABASE_COMMENT, wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(), null); db.setOwnerName(PUBLIC); db.setOwnerType(PrincipalType.ROLE); + db.setCatalogName(DEFAULT_CATALOG_NAME); ms.createDatabase(db); } } @@ -702,7 +738,9 @@ private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObje */ private void createDefaultDB() throws MetaException { try { - createDefaultDB_core(getMS()); + RawStore ms = getMS(); + createDefaultCatalog(ms, wh); + createDefaultDB_core(ms); } catch (JDOException e) { LOG.warn("Retrying creating default database after error: " + e.getMessage(), e); try { @@ -710,7 +748,7 @@ private void createDefaultDB() throws MetaException { } catch (InvalidObjectException e1) { throw new MetaException(e1.getMessage()); } - } catch (InvalidObjectException e) { + } catch (InvalidObjectException|InvalidOperationException e) { throw new MetaException(e.getMessage()); } } @@ -853,8 +891,9 @@ private String startFunction(String function) { return startFunction(function, ""); } - private void startTableFunction(String function, String db, String tbl) { - startFunction(function, " : db=" + db + " tbl=" + tbl); + private void startTableFunction(String function, String catName, String db, String tbl) { + startFunction(function, " : tbl=" + + getCatalogQualifiedTableName(catName, db, tbl)); } private void startMultiTableFunction(String function, String db, List tbls) { @@ -862,14 +901,16 @@ private void startMultiTableFunction(String function, String db, List tb startFunction(function, " : db=" + db + " tbls=" + tableNames); } - private void startPartitionFunction(String function, String db, String tbl, + private void startPartitionFunction(String function, String cat, String db, String tbl, List partVals) { - startFunction(function, " : db=" + db + " tbl=" + tbl + "[" + join(partVals, ",") + "]"); + startFunction(function, " : tbl=" + + getCatalogQualifiedTableName(cat, db, tbl) + "[" + join(partVals, ",") + "]"); } - private void startPartitionFunction(String function, String db, String tbl, + private void startPartitionFunction(String function, String catName, String db, String tbl, Map partName) { - startFunction(function, " : db=" + db + " tbl=" + tbl + "partition=" + partName); + startFunction(function, " : tbl=" + + getCatalogQualifiedTableName(catName, db, tbl) + "partition=" + partName); } private void endFunction(String function, boolean successful, Exception e) { @@ -920,25 +961,234 @@ public void shutdown() { return counters; } + @Override + public void create_catalog(CreateCatalogRequest rqst) + throws AlreadyExistsException, InvalidObjectException, MetaException { + Catalog catalog = rqst.getCatalog(); + startFunction("create_catalog", ": " + catalog.toString()); + boolean success = false; + Exception ex = null; + try { + try { + getMS().getCatalog(catalog.getName()); + throw new AlreadyExistsException("Catalog " + catalog.getName() + " already exists"); + } catch (NoSuchObjectException e) { + // expected + } + + if (!MetaStoreUtils.validateName(catalog.getName(), null)) { + throw new InvalidObjectException(catalog.getName() + " is not a valid catalog name"); + } + + if (catalog.getLocationUri() == null) { + throw new InvalidObjectException("You must specify a path for the catalog"); + } + + RawStore ms = getMS(); + Path catPath = new Path(catalog.getLocationUri()); + boolean madeDir = false; + Map transactionalListenersResponses = Collections.emptyMap(); + try { + firePreEvent(new PreCreateCatalogEvent(this, catalog)); + if (!wh.isDir(catPath)) { + if (!wh.mkdirs(catPath)) { + throw new MetaException("Unable to create catalog path " + catPath + + ", failed to create catalog " + catalog.getName()); + } + madeDir = true; + } + + ms.openTransaction(); + ms.createCatalog(catalog); + + // Create a default database inside the catalog + Database db = new Database(DEFAULT_DATABASE_NAME, "Default database for catalog " + + catalog.getName(), catalog.getLocationUri(), Collections.emptyMap()); + db.setCatalogName(catalog.getName()); + create_database_core(ms, db); + + if (!transactionalListeners.isEmpty()) { + transactionalListenersResponses = + MetaStoreListenerNotifier.notifyEvent(transactionalListeners, + EventType.CREATE_CATALOG, + new CreateCatalogEvent(true, this, catalog)); + } + + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + if (madeDir) { + wh.deleteDir(catPath, true); + } + } + + if (!listeners.isEmpty()) { + MetaStoreListenerNotifier.notifyEvent(listeners, + EventType.CREATE_CATALOG, + new CreateCatalogEvent(success, this, catalog), + null, + transactionalListenersResponses, ms); + } + } + success = true; + } catch (AlreadyExistsException|InvalidObjectException|MetaException e) { + ex = e; + throw e; + } finally { + endFunction("create_catalog", success, ex); + } + } + + @Override + public GetCatalogResponse get_catalog(GetCatalogRequest rqst) + throws NoSuchObjectException, TException { + String catName = rqst.getName(); + startFunction("get_catalog", ": " + catName); + Catalog cat = null; + Exception ex = null; + try { + cat = getMS().getCatalog(catName); + firePreEvent(new PreReadCatalogEvent(this, cat)); + return new GetCatalogResponse(cat); + } catch (MetaException|NoSuchObjectException e) { + ex = e; + throw e; + } finally { + endFunction("get_database", cat != null, ex); + } + } + + @Override + public GetCatalogsResponse get_catalogs() throws MetaException { + startFunction("get_catalogs"); + + List ret = null; + Exception ex = null; + try { + ret = getMS().getCatalogs(); + } catch (MetaException e) { + ex = e; + throw e; + } finally { + endFunction("get_catalog", ret != null, ex); + } + return new GetCatalogsResponse(ret == null ? Collections.emptyList() : ret); + + } + + @Override + public void drop_catalog(DropCatalogRequest rqst) + throws NoSuchObjectException, InvalidOperationException, MetaException { + String catName = rqst.getName(); + startFunction("drop_catalog", ": " + catName); + if (DEFAULT_CATALOG_NAME.equalsIgnoreCase(catName)) { + endFunction("drop_catalog", false, null); + throw new MetaException("Can not drop " + DEFAULT_CATALOG_NAME + " catalog"); + } + + boolean success = false; + Exception ex = null; + try { + dropCatalogCore(catName); + success = true; + } catch (NoSuchObjectException|InvalidOperationException|MetaException e) { + ex = e; + throw e; + } catch (Exception e) { + ex = e; + throw newMetaException(e); + } finally { + endFunction("drop_catalog", success, ex); + } + + } + + private void dropCatalogCore(String catName) + throws MetaException, NoSuchObjectException, InvalidOperationException { + boolean success = false; + Catalog cat = null; + Map transactionalListenerResponses = Collections.emptyMap(); + RawStore ms = getMS(); + try { + ms.openTransaction(); + cat = ms.getCatalog(catName); + + firePreEvent(new PreDropCatalogEvent(this, cat)); + + List allDbs = get_databases(prependNotNullCatToDbName(catName, null)); + if (allDbs != null && !allDbs.isEmpty()) { + // It might just be the default, in which case we can drop that one if it's empty + if (allDbs.size() == 1 && allDbs.get(0).equals(DEFAULT_DATABASE_NAME)) { + try { + drop_database_core(ms, catName, DEFAULT_DATABASE_NAME, true, false); + } catch (InvalidOperationException e) { + // This means there are tables of something in the database + throw new InvalidOperationException("There are still objects in the default " + + "database for catalog " + catName); + } catch (InvalidObjectException|IOException|InvalidInputException e) { + MetaException me = new MetaException("Error attempt to drop default database for " + + "catalog " + catName); + me.initCause(e); + throw me; + } + } else { + throw new InvalidOperationException("There are non-default databases in the catalog " + + catName + " so it cannot be dropped."); + } + } + + ms.dropCatalog(catName) ; + if (!transactionalListeners.isEmpty()) { + transactionalListenerResponses = + MetaStoreListenerNotifier.notifyEvent(transactionalListeners, + EventType.DROP_CATALOG, + new DropCatalogEvent(true, this, cat)); + } + + success = ms.commitTransaction(); + } finally { + if (success) { + wh.deleteDir(wh.getDnsPath(new Path(cat.getLocationUri())), false); + } else { + ms.rollbackTransaction(); + } + + if (!listeners.isEmpty()) { + MetaStoreListenerNotifier.notifyEvent(listeners, + EventType.DROP_CATALOG, + new DropCatalogEvent(success, this, cat), + null, + transactionalListenerResponses, ms); + } + } + } + + + // Assumes that the catalog has already been set. private void create_database_core(RawStore ms, final Database db) throws AlreadyExistsException, InvalidObjectException, MetaException { if (!MetaStoreUtils.validateName(db.getName(), null)) { throw new InvalidObjectException(db.getName() + " is not a valid database name"); } - if (null == db.getLocationUri()) { - db.setLocationUri(wh.getDefaultDatabasePath(db.getName()).toString()); - } else { - db.setLocationUri(wh.getDnsPath(new Path(db.getLocationUri())).toString()); + Catalog cat = null; + try { + cat = getMS().getCatalog(db.getCatalogName()); + } catch (NoSuchObjectException e) { + LOG.error("No such catalog " + db.getCatalogName()); + throw new InvalidObjectException("No such catalog " + db.getCatalogName()); } + Path dbPath = wh.determineDatabasePath(cat, db); + db.setLocationUri(dbPath.toString()); - Path dbPath = new Path(db.getLocationUri()); boolean success = false; boolean madeDir = false; Map transactionalListenersResponses = Collections.emptyMap(); try { firePreEvent(new PreCreateDatabaseEvent(db, this)); if (!wh.isDir(dbPath)) { + LOG.debug("Creating database path " + dbPath); if (!wh.mkdirs(dbPath)) { throw new MetaException("Unable to create database path " + dbPath + ", failed to create database " + db.getName()); @@ -981,9 +1231,10 @@ public void create_database(final Database db) startFunction("create_database", ": " + db.toString()); boolean success = false; Exception ex = null; + if (!db.isSetCatalogName()) db.setCatalogName(getDefaultCatalog(conf)); try { try { - if (null != get_database_core(db.getName())) { + if (null != get_database_core(db.getCatalogName(), db.getName())) { throw new AlreadyExistsException("Database " + db.getName() + " already exists"); } } catch (NoSuchObjectException e) { @@ -1022,7 +1273,8 @@ public Database get_database(final String name) throws NoSuchObjectException, Me Database db = null; Exception ex = null; try { - db = get_database_core(name); + String[] parsedDbName = parseDbName(name, conf); + db = get_database_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]); firePreEvent(new PreReadDatabaseEvent(db, this)); } catch (MetaException|NoSuchObjectException e) { ex = e; @@ -1034,11 +1286,10 @@ public Database get_database(final String name) throws NoSuchObjectException, Me } @Override - public Database get_database_core(final String name) throws NoSuchObjectException, - MetaException { + public Database get_database_core(String catName, final String name) throws NoSuchObjectException, MetaException { Database db = null; try { - db = getMS().getDatabase(name); + db = getMS().getDatabase(catName, name); } catch (MetaException | NoSuchObjectException e) { throw e; } catch (Exception e) { @@ -1050,7 +1301,7 @@ public Database get_database_core(final String name) throws NoSuchObjectExceptio @Override public void alter_database(final String dbName, final Database newDB) throws TException { - startFunction("alter_database" + dbName); + startFunction("alter_database " + dbName); boolean success = false; Exception ex = null; RawStore ms = getMS(); @@ -1062,27 +1313,30 @@ public void alter_database(final String dbName, final Database newDB) throws TEx newDB.setLocationUri(wh.getDnsPath(new Path(newDB.getLocationUri())).toString()); } + String[] parsedDbName = parseDbName(dbName, conf); + try { - oldDB = get_database_core(dbName); + oldDB = get_database_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]); if (oldDB == null) { - throw new MetaException("Could not alter database \"" + dbName + "\". Could not retrieve old definition."); + throw new MetaException("Could not alter database \"" + parsedDbName[DB_NAME] + + "\". Could not retrieve old definition."); } firePreEvent(new PreAlterDatabaseEvent(oldDB, newDB, this)); ms.openTransaction(); - ms.alterDatabase(dbName, newDB); + ms.alterDatabase(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], newDB); if (!transactionalListeners.isEmpty()) { transactionalListenersResponses = - MetaStoreListenerNotifier.notifyEvent(transactionalListeners, - EventType.ALTER_DATABASE, - new AlterDatabaseEvent(oldDB, newDB, true, this)); + MetaStoreListenerNotifier.notifyEvent(transactionalListeners, + EventType.ALTER_DATABASE, + new AlterDatabaseEvent(oldDB, newDB, true, this)); } success = ms.commitTransaction(); - } catch (Exception e) { + } catch (MetaException|NoSuchObjectException e) { ex = e; - rethrowException(e); + throw e; } finally { if (!success) { ms.rollbackTransaction(); @@ -1090,16 +1344,16 @@ public void alter_database(final String dbName, final Database newDB) throws TEx if ((null != oldDB) && (!listeners.isEmpty())) { MetaStoreListenerNotifier.notifyEvent(listeners, - EventType.ALTER_DATABASE, - new AlterDatabaseEvent(oldDB, newDB, success, this), - null, - transactionalListenersResponses, ms); + EventType.ALTER_DATABASE, + new AlterDatabaseEvent(oldDB, newDB, success, this), + null, + transactionalListenersResponses, ms); } endFunction("alter_database", success, ex); } } - private void drop_database_core(RawStore ms, + private void drop_database_core(RawStore ms, String catName, final String name, final boolean deleteData, final boolean cascade) throws NoSuchObjectException, InvalidOperationException, MetaException, IOException, InvalidObjectException, InvalidInputException { @@ -1110,12 +1364,13 @@ private void drop_database_core(RawStore ms, Map transactionalListenerResponses = Collections.emptyMap(); try { ms.openTransaction(); - db = ms.getDatabase(name); + db = ms.getDatabase(catName, name); firePreEvent(new PreDropDatabaseEvent(db, this)); + String catPrependedName = MetaStoreUtils.prependCatalogToDbName(catName, name, conf); - List allTables = get_all_tables(db.getName()); - List allFunctions = get_functions(db.getName(), "*"); + List allTables = get_all_tables(catPrependedName); + List allFunctions = get_functions(catPrependedName, "*"); if (!cascade) { if (!allTables.isEmpty()) { @@ -1138,7 +1393,7 @@ private void drop_database_core(RawStore ms, // drop any functions before dropping db for (String funcName : allFunctions) { - drop_function(name, funcName); + drop_function(catPrependedName, funcName); } // drop tables before dropping db @@ -1152,7 +1407,7 @@ private void drop_database_core(RawStore ms, List
tables; try { - tables = ms.getTableObjectsByName(name, allTables.subList(startIndex, endIndex)); + tables = ms.getTableObjectsByName(catName, name, allTables.subList(startIndex, endIndex)); } catch (UnknownDBException e) { throw new MetaException(e.getMessage()); } @@ -1178,18 +1433,19 @@ private void drop_database_core(RawStore ms, // For each partition in each table, drop the partitions and get a list of // partitions' locations which might need to be deleted - partitionPaths = dropPartitionsAndGetLocations(ms, name, table.getTableName(), + partitionPaths = dropPartitionsAndGetLocations(ms, catName, name, table.getTableName(), tablePath, table.getPartitionKeys(), deleteData && !isExternal(table)); // Drop the table but not its data - drop_table(name, table.getTableName(), false); + drop_table(MetaStoreUtils.prependCatalogToDbName(table.getCatName(), table.getDbName(), conf), + table.getTableName(), false); } startIndex = endIndex; } } - if (ms.dropDatabase(name)) { + if (ms.dropDatabase(catName, name)) { if (!transactionalListeners.isEmpty()) { transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, @@ -1246,45 +1502,46 @@ private boolean isSubdirectory(Path parent, Path other) { @Override public void drop_database(final String dbName, final boolean deleteData, final boolean cascade) throws NoSuchObjectException, InvalidOperationException, MetaException { - startFunction("drop_database", ": " + dbName); - if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(dbName)) { + String[] parsedDbName = parseDbName(dbName, conf); + if (DEFAULT_CATALOG_NAME.equalsIgnoreCase(parsedDbName[CAT_NAME]) && + DEFAULT_DATABASE_NAME.equalsIgnoreCase(parsedDbName[DB_NAME])) { endFunction("drop_database", false, null); - throw new MetaException("Can not drop default database"); + throw new MetaException("Can not drop " + DEFAULT_DATABASE_NAME + " database in catalog " + + DEFAULT_CATALOG_NAME); } boolean success = false; Exception ex = null; try { - drop_database_core(getMS(), dbName, deleteData, cascade); + drop_database_core(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], deleteData, + cascade); success = true; - } catch (IOException e) { + } catch (NoSuchObjectException|InvalidOperationException|MetaException e) { ex = e; - throw new MetaException(e.getMessage()); + throw e; } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof InvalidOperationException) { - throw (InvalidOperationException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; - } else { - throw newMetaException(e); - } + throw newMetaException(e); } finally { endFunction("drop_database", success, ex); } } + @Override public List get_databases(final String pattern) throws MetaException { startFunction("get_databases", ": " + pattern); + String[] parsedDbNamed = parseDbName(pattern, conf); List ret = null; Exception ex = null; try { - ret = getMS().getDatabases(pattern); + if (parsedDbNamed[DB_NAME] == null) { + ret = getMS().getAllDatabases(parsedDbNamed[CAT_NAME]); + } else { + ret = getMS().getDatabases(parsedDbNamed[CAT_NAME], parsedDbNamed[DB_NAME]); + } } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -1298,25 +1555,10 @@ public void drop_database(final String dbName, final boolean deleteData, final b return ret; } + @Override public List get_all_databases() throws MetaException { - startFunction("get_all_databases"); - - List ret = null; - Exception ex = null; - try { - ret = getMS().getAllDatabases(); - } catch (Exception e) { - ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else { - throw newMetaException(e); - } - } finally { - endFunction("get_all_databases", ret != null, ex); - } - return ret; + return get_databases(MetaStoreUtils.prependCatalogToDbName(null, null, conf)); } private void create_type_core(final RawStore ms, final Type type) @@ -1467,14 +1709,16 @@ private void create_table_core(final RawStore ms, final Table tbl, ms.openTransaction(); - Database db = ms.getDatabase(tbl.getDbName()); + if (!tbl.isSetCatName()) tbl.setCatName(getDefaultCatalog(conf)); + Database db = ms.getDatabase(tbl.getCatName(), tbl.getDbName()); if (db == null) { - throw new NoSuchObjectException("The database " + tbl.getDbName() + " does not exist"); + throw new NoSuchObjectException("The database " + + Warehouse.getCatalogQualifiedDbName(tbl.getCatName(), tbl.getDbName()) + " does not exist"); } // get_table checks whether database exists, it should be moved here - if (is_table_exists(ms, tbl.getDbName(), tbl.getTableName())) { - throw new AlreadyExistsException("Table " + tbl.getTableName() + if (is_table_exists(ms, tbl.getCatName(), tbl.getDbName(), tbl.getTableName())) { + throw new AlreadyExistsException("Table " + getCatalogQualifiedTableName(tbl) + " already exists"); } @@ -1482,7 +1726,7 @@ private void create_table_core(final RawStore ms, final Table tbl, if (tbl.getSd().getLocation() == null || tbl.getSd().getLocation().isEmpty()) { tblPath = wh.getDefaultTablePath( - ms.getDatabase(tbl.getDbName()), tbl.getTableName()); + ms.getDatabase(tbl.getCatName(), tbl.getDbName()), tbl.getTableName()); } else { if (!isExternal(tbl) && !MetaStoreUtils.isNonNativeTable(tbl)) { LOG.warn("Location: " + tbl.getSd().getLocation() @@ -1550,7 +1794,7 @@ private void create_table_core(final RawStore ms, final Table tbl, } } } - int notNullConstraintSize = notNullConstraints.size(); + int notNullConstraintSize = 0; if (notNullConstraints != null) { for (int i = 0; i < notNullConstraints.size(); i++) { if (notNullConstraints.get(i).getNn_name() == null) { @@ -1558,7 +1802,7 @@ private void create_table_core(final RawStore ms, final Table tbl, } } } - int defaultConstraintSize = defaultConstraints.size(); + int defaultConstraintSize = 0; if (defaultConstraints!= null) { for (int i = 0; i < defaultConstraints.size(); i++) { if (defaultConstraints.get(i).getDc_name() == null) { @@ -1649,6 +1893,7 @@ public void create_table_with_environment_context(final Table tbl, create_table_core(getMS(), tbl, envContext); success = true; } catch (NoSuchObjectException e) { + LOG.warn("create_table_with_environment_context got ", e); ex = e; throw new InvalidObjectException(e.getMessage()); } catch (Exception e) { @@ -1704,6 +1949,7 @@ public void create_table_with_constraints(final Table tbl, @Override public void drop_constraint(DropConstraintRequest req) throws MetaException, InvalidObjectException { + String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); String dbName = req.getDbname(); String tableName = req.getTablename(); String constraintName = req.getConstraintname(); @@ -1713,9 +1959,9 @@ public void drop_constraint(DropConstraintRequest req) RawStore ms = getMS(); try { ms.openTransaction(); - ms.dropConstraint(dbName, tableName, constraintName); + ms.dropConstraint(catName, dbName, tableName, constraintName); if (transactionalListeners.size() > 0) { - DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(dbName, + DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(catName, dbName, tableName, constraintName, true, this); for (MetaStoreEventListener transactionalListener : transactionalListeners) { transactionalListener.onDropConstraint(dropConstraintEvent); @@ -1737,7 +1983,7 @@ public void drop_constraint(DropConstraintRequest req) ms.rollbackTransaction(); } else { for (MetaStoreEventListener listener : listeners) { - DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(dbName, + DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(catName, dbName, tableName, constraintName, true, this); listener.onDropConstraint(dropConstraintEvent); } @@ -2059,15 +2305,16 @@ public void add_check_constraint(AddCheckConstraintRequest req) } } - private boolean is_table_exists(RawStore ms, String dbname, String name) + private boolean is_table_exists(RawStore ms, String catName, String dbname, String name) throws MetaException { - return (ms.getTable(dbname, name) != null); + return (ms.getTable(catName, dbname, name) != null); } - private boolean drop_table_core(final RawStore ms, final String dbname, final String name, - final boolean deleteData, final EnvironmentContext envContext, - final String indexName) throws NoSuchObjectException, - MetaException, IOException, InvalidObjectException, InvalidInputException { + private boolean drop_table_core(final RawStore ms, final String catName, final String dbname, + final String name, final boolean deleteData, + final EnvironmentContext envContext, final String indexName) + throws NoSuchObjectException, MetaException, IOException, InvalidObjectException, + InvalidInputException { boolean success = false; boolean isExternal = false; Path tblPath = null; @@ -2078,7 +2325,7 @@ private boolean drop_table_core(final RawStore ms, final String dbname, final St try { ms.openTransaction(); // drop any partitions - tbl = get_table_core(dbname, name); + tbl = get_table_core(catName, dbname, name); if (tbl == null) { throw new NoSuchObjectException(name + " doesn't exist"); } @@ -2101,10 +2348,14 @@ private boolean drop_table_core(final RawStore ms, final String dbname, final St } // Drop the partitions and get a list of locations which need to be deleted - partPaths = dropPartitionsAndGetLocations(ms, dbname, name, tblPath, + partPaths = dropPartitionsAndGetLocations(ms, catName, dbname, name, tblPath, tbl.getPartitionKeys(), deleteData && !isExternal); - if (!ms.dropTable(dbname, name)) { - String tableName = dbname + "." + name; + + // Drop any constraints on the table + ms.dropConstraint(catName, dbname, name, null, true); + + if (!ms.dropTable(catName, dbname, name)) { + String tableName = getCatalogQualifiedTableName(catName, dbname, name); throw new MetaException(indexName == null ? "Unable to drop table " + tableName: "Unable to drop index table " + tableName + " for index " + indexName); } else { @@ -2221,7 +2472,7 @@ private void deletePartitionData(List partPaths, boolean ifPurge) { * @throws InvalidObjectException * @throws NoSuchObjectException */ - private List dropPartitionsAndGetLocations(RawStore ms, String dbName, + private List dropPartitionsAndGetLocations(RawStore ms, String catName, String dbName, String tableName, Path tablePath, List partitionKeys, boolean checkLocation) throws MetaException, IOException, NoSuchObjectException, InvalidObjectException, InvalidInputException { @@ -2232,12 +2483,12 @@ private void deletePartitionData(List partPaths, boolean ifPurge) { tableDnsPath = wh.getDnsPath(tablePath); } List partPaths = new ArrayList<>(); - Table tbl = ms.getTable(dbName, tableName); + Table tbl = ms.getTable(catName, dbName, tableName); // call dropPartition on each of the table's partitions to follow the // procedure for cleanly dropping partitions. while (true) { - List partsToDelete = ms.getPartitions(dbName, tableName, partitionBatchSize); + List partsToDelete = ms.getPartitions(catName, dbName, tableName, partitionBatchSize); if (partsToDelete == null || partsToDelete.isEmpty()) { break; } @@ -2269,7 +2520,7 @@ private void deletePartitionData(List partPaths, boolean ifPurge) { } } } - ms.dropPartitions(dbName, tableName, partNames); + ms.dropPartitions(catName, dbName, tableName, partNames); } return partPaths; @@ -2285,12 +2536,14 @@ public void drop_table(final String dbname, final String name, final boolean del public void drop_table_with_environment_context(final String dbname, final String name, final boolean deleteData, final EnvironmentContext envContext) throws NoSuchObjectException, MetaException { - startTableFunction("drop_table", dbname, name); + String[] parsedDbName = parseDbName(dbname, conf); + startTableFunction("drop_table", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name); boolean success = false; Exception ex = null; try { - success = drop_table_core(getMS(), dbname, name, deleteData, envContext, null); + success = drop_table_core(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, + deleteData, envContext, null); } catch (IOException e) { ex = e; throw new MetaException(e.getMessage()); @@ -2323,6 +2576,7 @@ private void updateStatsForTruncate(Map props, EnvironmentContext } private void alterPartitionForTruncate(final RawStore ms, + final String catName, final String dbName, final String tableName, final Table table, @@ -2342,18 +2596,20 @@ private void alterPartitionForTruncate(final RawStore ms, new AlterPartitionEvent(partition, partition, table, true, true, this)); } - alterHandler.alterPartition(ms, wh, dbName, tableName, null, partition, environmentContext, this); + alterHandler.alterPartition(ms, wh, catName, dbName, tableName, null, partition, + environmentContext, this); } private void alterTableStatsForTruncate(final RawStore ms, + final String catName, final String dbName, final String tableName, final Table table, final List partNames) throws Exception { if (partNames == null) { if (0 != table.getPartitionKeysSize()) { - for (Partition partition : ms.getPartitions(dbName, tableName, Integer.MAX_VALUE)) { - alterPartitionForTruncate(ms, dbName, tableName, table, partition); + for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) { + alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition); } } else { EnvironmentContext environmentContext = new EnvironmentContext(); @@ -2371,17 +2627,18 @@ private void alterTableStatsForTruncate(final RawStore ms, new AlterTableEvent(table, table, true, true, this)); } - alterHandler.alterTable(ms, wh, dbName, tableName, table, environmentContext, this); + alterHandler.alterTable(ms, wh, catName, dbName, tableName, table, environmentContext, this); } } else { - for (Partition partition : ms.getPartitionsByNames(dbName, tableName, partNames)) { - alterPartitionForTruncate(ms, dbName, tableName, table, partition); + for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) { + alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition); } } return; } private List getLocationsForTruncate(final RawStore ms, + final String catName, final String dbName, final String tableName, final Table table, @@ -2389,14 +2646,14 @@ private void alterTableStatsForTruncate(final RawStore ms, List locations = new ArrayList<>(); if (partNames == null) { if (0 != table.getPartitionKeysSize()) { - for (Partition partition : ms.getPartitions(dbName, tableName, Integer.MAX_VALUE)) { + for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) { locations.add(new Path(partition.getSd().getLocation())); } } else { locations.add(new Path(table.getSd().getLocation())); } } else { - for (Partition partition : ms.getPartitionsByNames(dbName, tableName, partNames)) { + for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) { locations.add(new Path(partition.getSd().getLocation())); } } @@ -2413,11 +2670,13 @@ public CmRecycleResponse cm_recycle(final CmRecycleRequest request) throws MetaE public void truncate_table(final String dbName, final String tableName, List partNames) throws NoSuchObjectException, MetaException { try { - Table tbl = get_table_core(dbName, tableName); + String[] parsedDbName = parseDbName(dbName, conf); + Table tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); boolean isAutopurge = (tbl.isSetParameters() && "true".equalsIgnoreCase(tbl.getParameters().get("auto.purge"))); // This is not transactional - for (Path location : getLocationsForTruncate(getMS(), dbName, tableName, tbl, partNames)) { + for (Path location : getLocationsForTruncate(getMS(), parsedDbName[CAT_NAME], + parsedDbName[DB_NAME], tableName, tbl, partNames)) { FileSystem fs = location.getFileSystem(getConf()); if (!org.apache.hadoop.hive.metastore.utils.HdfsUtils.isPathEncrypted(getConf(), fs.getUri(), location) && !FileUtils.pathHasSnapshotSubDir(location, fs)) { @@ -2439,7 +2698,8 @@ public void truncate_table(final String dbName, final String tableName, List get_table_meta(String dbnames, String tblNames, List tblTypes) throws MetaException, NoSuchObjectException { List t = null; - startTableFunction("get_table_metas", dbnames, tblNames); + String[] parsedDbName = parseDbName(dbnames, conf); + startTableFunction("get_table_metas", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblNames); Exception ex = null; try { - t = getMS().getTableMeta(dbnames, tblNames, tblTypes); + t = getMS().getTableMeta(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblNames, tblTypes); } catch (Exception e) { ex = e; throw newMetaException(e); @@ -2523,14 +2786,13 @@ private Table getTableInternal(String dbname, String name, } @Override - public Table get_table_core(final String dbname, final String name) throws MetaException, - NoSuchObjectException { + public Table get_table_core(final String catName, final String dbname, final String name) + throws MetaException, NoSuchObjectException { Table t = null; try { - t = getMS().getTable(dbname, name); + t = getMS().getTable(catName, dbname, name); if (t == null) { - throw new NoSuchObjectException(dbname + "." + name - + " table not found"); + throw new NoSuchObjectException(getCatalogQualifiedTableName(catName, dbname, name)); } } catch (Exception e) { throwMetaException(e); @@ -2558,17 +2820,20 @@ public Table get_table_core(final String dbname, final String name) throws MetaE @Deprecated public List
get_table_objects_by_name(final String dbName, final List tableNames) throws MetaException, InvalidOperationException, UnknownDBException { - return getTableObjectsInternal(dbName, tableNames, null); + String[] parsedDbName = parseDbName(dbName, conf); + return getTableObjectsInternal(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableNames, null); } @Override public GetTablesResult get_table_objects_by_name_req(GetTablesRequest req) throws TException { - return new GetTablesResult(getTableObjectsInternal( + String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); + return new GetTablesResult(getTableObjectsInternal(catName, req.getDbName(), req.getTblNames(), req.getCapabilities())); } - private List
getTableObjectsInternal( - String dbName, List tableNames, ClientCapabilities capabilities) + private List
getTableObjectsInternal(String catName, String dbName, + List tableNames, + ClientCapabilities capabilities) throws MetaException, InvalidOperationException, UnknownDBException { if (isInTest) { assertClientHasCapability(capabilities, ClientCapability.TEST_CAPABILITY, @@ -2606,7 +2871,8 @@ public GetTablesResult get_table_objects_by_name_req(GetTablesRequest req) throw // Oracle cannot have over 1000 expressions in a in-list while (startIndex < distinctTableNames.size()) { int endIndex = Math.min(startIndex + tableBatchSize, distinctTableNames.size()); - tables.addAll(ms.getTableObjectsByName(dbName, distinctTableNames.subList(startIndex, endIndex))); + tables.addAll(ms.getTableObjectsByName(catName, dbName, distinctTableNames.subList( + startIndex, endIndex))); startIndex = endIndex; } for (Table t : tables) { @@ -2638,8 +2904,8 @@ public GetTablesResult get_table_objects_by_name_req(GetTablesRequest req) throw } @Override - public void update_creation_metadata(final String dbName, final String tableName, CreationMetadata cm) throws MetaException { - getMS().updateCreationMetadata(dbName, tableName, cm); + public void update_creation_metadata(String catName, final String dbName, final String tableName, CreationMetadata cm) throws MetaException { + getMS().updateCreationMetadata(catName, dbName, tableName, cm); } private void assertClientHasCapability(ClientCapabilities client, @@ -2668,14 +2934,16 @@ private boolean doesClientHaveCapability(ClientCapabilities client, ClientCapabi List tables = null; startFunction("get_table_names_by_filter", ": db = " + dbName + ", filter = " + filter); Exception ex = null; + String[] parsedDbName = parseDbName(dbName, conf); try { - if (dbName == null || dbName.isEmpty()) { + if (parsedDbName[CAT_NAME] == null || parsedDbName[CAT_NAME].isEmpty() || + parsedDbName[DB_NAME] == null || parsedDbName[DB_NAME].isEmpty()) { throw new UnknownDBException("DB name is null or empty"); } if (filter == null) { throw new InvalidOperationException(filter + " cannot apply null filter"); } - tables = getMS().listTableNamesByFilter(dbName, filter, maxTables); + tables = getMS().listTableNamesByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], filter, maxTables); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -2693,9 +2961,10 @@ private boolean doesClientHaveCapability(ClientCapabilities client, ClientCapabi return tables; } - private Partition append_partition_common(RawStore ms, String dbName, String tableName, - List part_vals, EnvironmentContext envContext) throws InvalidObjectException, - AlreadyExistsException, MetaException { + private Partition append_partition_common(RawStore ms, String catName, String dbName, + String tableName, List part_vals, + EnvironmentContext envContext) + throws InvalidObjectException, AlreadyExistsException, MetaException { Partition part = new Partition(); boolean success = false, madeDir = false; @@ -2704,13 +2973,14 @@ private Partition append_partition_common(RawStore ms, String dbName, String tab Map transactionalListenerResponses = Collections.emptyMap(); try { ms.openTransaction(); + part.setCatName(catName); part.setDbName(dbName); part.setTableName(tableName); part.setValues(part_vals); MetaStoreUtils.validatePartitionNameCharacters(part_vals, partitionValidationPattern); - tbl = ms.getTable(part.getDbName(), part.getTableName()); + tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName()); if (tbl == null) { throw new InvalidObjectException( "Unable to add partition because table or database do not exist"); @@ -2729,7 +2999,7 @@ private Partition append_partition_common(RawStore ms, String dbName, String tab Partition old_part; try { - old_part = ms.getPartition(part.getDbName(), part + old_part = ms.getPartition(part.getCatName(), part.getDbName(), part .getTableName(), part.getValues()); } catch (NoSuchObjectException e) { // this means there is no existing partition @@ -2810,7 +3080,12 @@ public Partition append_partition(final String dbName, final String tableName, public Partition append_partition_with_environment_context(final String dbName, final String tableName, final List part_vals, final EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, MetaException { - startPartitionFunction("append_partition", dbName, tableName, part_vals); + String[] parsedDbName = parseDbName(dbName, conf); + startPartitionFunction("append_partition", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, part_vals); + // Make sure the part_vals are valid + if (part_vals == null || part_vals.isEmpty()) { + throw new InvalidObjectException("You must provide partition values"); + } if (LOG.isDebugEnabled()) { for (String part : part_vals) { LOG.debug(part); @@ -2820,7 +3095,7 @@ public Partition append_partition_with_environment_context(final String dbName, Partition ret = null; Exception ex = null; try { - ret = append_partition_common(getMS(), dbName, tableName, part_vals, envContext); + ret = append_partition_common(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, part_vals, envContext); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -2927,7 +3202,7 @@ public boolean equals(Object obj) { } } - private List add_partitions_core(final RawStore ms, + private List add_partitions_core(final RawStore ms, String catName, String dbName, String tblName, List parts, final boolean ifNotExists) throws TException { logInfo("add_partitions"); @@ -2941,10 +3216,11 @@ public boolean equals(Object obj) { try { ms.openTransaction(); - tbl = ms.getTable(dbName, tblName); + tbl = ms.getTable(catName, dbName, tblName); if (tbl == null) { throw new InvalidObjectException("Unable to add partitions because " - + "database or table " + dbName + "." + tblName + " does not exist"); + + getCatalogQualifiedTableName(catName, dbName, tblName) + + " does not exist"); } if (!parts.isEmpty()) { @@ -2955,8 +3231,9 @@ public boolean equals(Object obj) { final Table table = tbl; for (final Partition part : parts) { if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) { - throw new MetaException("Partition does not belong to target table " - + dbName + "." + tblName + ": " + part); + throw new MetaException("Partition does not belong to target table " + + getCatalogQualifiedTableName(catName, dbName, tblName) + ": " + + part); } boolean shouldAdd = startAddPartition(ms, part, ifNotExists); @@ -3015,7 +3292,7 @@ public Object run() throws Exception { } if (!newParts.isEmpty()) { - success = ms.addPartitions(dbName, tblName, newParts); + success = ms.addPartitions(catName, dbName, tblName, newParts); } else { success = true; } @@ -3077,7 +3354,12 @@ public AddPartitionsResult add_partitions_req(AddPartitionsRequest request) return result; } try { - List parts = add_partitions_core(getMS(), request.getDbName(), + if (!request.isSetCatName()) request.setCatName(getDefaultCatalog(conf)); + // Make sure all of the partitions have the catalog set as well + request.getParts().forEach(p -> { + if (!p.isSetCatName()) p.setCatName(getDefaultCatalog(conf)); + }); + List parts = add_partitions_core(getMS(), request.getCatName(), request.getDbName(), request.getTblName(), request.getParts(), request.isIfNotExists()); if (request.isNeedResult()) { result.setPartitions(parts); @@ -3102,7 +3384,11 @@ public int add_partitions(final List parts) throws MetaException, Exception ex = null; try { // Old API assumed all partitions belong to the same table; keep the same assumption - ret = add_partitions_core(getMS(), parts.get(0).getDbName(), + if (!parts.get(0).isSetCatName()) { + String defaultCat = getDefaultCatalog(conf); + for (Partition p : parts) p.setCatName(defaultCat); + } + ret = add_partitions_core(getMS(), parts.get(0).getCatName(), parts.get(0).getDbName(), parts.get(0).getTableName(), parts, false).size(); assert ret == parts.size(); } catch (Exception e) { @@ -3134,12 +3420,21 @@ public int add_partitions_pspec(final List partSpecs) String dbName = partSpecs.get(0).getDbName(); String tableName = partSpecs.get(0).getTableName(); + // If the catalog name isn't set, we need to go through and set it. + String catName; + if (!partSpecs.get(0).isSetCatName()) { + catName = getDefaultCatalog(conf); + partSpecs.forEach(ps -> ps.setCatName(catName)); + } else { + catName = partSpecs.get(0).getCatName(); + } - return add_partitions_pspec_core(getMS(), dbName, tableName, partSpecs, false); + return add_partitions_pspec_core(getMS(), catName, dbName, tableName, partSpecs, false); } - private int add_partitions_pspec_core( - RawStore ms, String dbName, String tblName, List partSpecs, boolean ifNotExists) + private int add_partitions_pspec_core(RawStore ms, String catName, String dbName, + String tblName, List partSpecs, + boolean ifNotExists) throws TException { boolean success = false; // Ensures that the list doesn't have dups, and keeps track of directories we have created. @@ -3151,7 +3446,7 @@ private int add_partitions_pspec_core( Map transactionalListenerResponses = Collections.emptyMap(); try { ms.openTransaction(); - tbl = ms.getTable(dbName, tblName); + tbl = ms.getTable(catName, dbName, tblName); if (tbl == null) { throw new InvalidObjectException("Unable to add partitions because " + "database or table " + dbName + "." + tblName + " does not exist"); @@ -3219,7 +3514,7 @@ public Partition run() throws Exception { throw new MetaException(e.getMessage()); } - success = ms.addPartitions(dbName, tblName, partitionSpecProxy, ifNotExists); + success = ms.addPartitions(catName, dbName, tblName, partitionSpecProxy, ifNotExists); //setting success to false to make sure that if the listener fails, rollback happens. success = false; @@ -3257,7 +3552,7 @@ private boolean startAddPartition( RawStore ms, Partition part, boolean ifNotExists) throws TException { MetaStoreUtils.validatePartitionNameCharacters(part.getValues(), partitionValidationPattern); - boolean doesExist = ms.doesPartitionExist( + boolean doesExist = ms.doesPartitionExist(part.getCatName(), part.getDbName(), part.getTableName(), part.getValues()); if (doesExist && !ifNotExists) { throw new AlreadyExistsException("Partition already exists: " + part); @@ -3355,9 +3650,10 @@ private Partition add_partition_core(final RawStore ms, boolean success = false; Table tbl = null; Map transactionalListenerResponses = Collections.emptyMap(); + if (!part.isSetCatName()) part.setCatName(getDefaultCatalog(conf)); try { ms.openTransaction(); - tbl = ms.getTable(part.getDbName(), part.getTableName()); + tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName()); if (tbl == null) { throw new InvalidObjectException( "Unable to add partition because table or database do not exist"); @@ -3421,7 +3717,7 @@ public Partition add_partition_with_environment_context( throws InvalidObjectException, AlreadyExistsException, MetaException { startTableFunction("add_partition", - part.getDbName(), part.getTableName()); + part.getCatName(), part.getDbName(), part.getTableName()); Partition ret = null; Exception ex = null; try { @@ -3448,6 +3744,8 @@ public Partition exchange_partition(Map partitionSpecs, String sourceDbName, String sourceTableName, String destDbName, String destTableName) throws TException { exchange_partitions(partitionSpecs, sourceDbName, sourceTableName, destDbName, destTableName); + // Wouldn't it make more sense to return the first element of the list returned by the + // previous call? return new Partition(); } @@ -3455,24 +3753,36 @@ public Partition exchange_partition(Map partitionSpecs, public List exchange_partitions(Map partitionSpecs, String sourceDbName, String sourceTableName, String destDbName, String destTableName) throws TException { - if (partitionSpecs == null || sourceDbName == null || sourceTableName == null - || destDbName == null || destTableName == null) { + String[] parsedDestDbName = parseDbName(destDbName, conf); + String[] parsedSourceDbName = parseDbName(sourceDbName, conf); + // No need to check catalog for null as parseDbName() will never return null for the catalog. + if (partitionSpecs == null || parsedSourceDbName[DB_NAME] == null || sourceTableName == null + || parsedDestDbName[DB_NAME] == null || destTableName == null) { throw new MetaException("The DB and table name for the source and destination tables," + " and the partition specs must not be null."); } + if (!parsedDestDbName[CAT_NAME].equals(parsedSourceDbName[CAT_NAME])) { + throw new MetaException("You cannot move a partition across catalogs"); + } + boolean success = false; boolean pathCreated = false; RawStore ms = getMS(); ms.openTransaction(); - Table destinationTable = ms.getTable(destDbName, destTableName); + + Table destinationTable = + ms.getTable(parsedDestDbName[CAT_NAME], parsedDestDbName[DB_NAME], destTableName); if (destinationTable == null) { - throw new MetaException( - "The destination table " + destDbName + "." + destTableName + " not found"); + throw new MetaException( "The destination table " + + getCatalogQualifiedTableName(parsedDestDbName[CAT_NAME], + parsedDestDbName[DB_NAME], destTableName) + " not found"); } - Table sourceTable = ms.getTable(sourceDbName, sourceTableName); + Table sourceTable = + ms.getTable(parsedSourceDbName[CAT_NAME], parsedSourceDbName[DB_NAME], sourceTableName); if (sourceTable == null) { - throw new MetaException( - "The source table " + sourceDbName + "." + sourceTableName + " not found"); + throw new MetaException("The source table " + + getCatalogQualifiedTableName(parsedSourceDbName[CAT_NAME], + parsedSourceDbName[DB_NAME], sourceTableName) + " not found"); } List partVals = MetaStoreUtils.getPvals(sourceTable.getPartitionKeys(), partitionSpecs); @@ -3487,6 +3797,7 @@ public Partition exchange_partition(Map partitionSpecs, } i++; } + // Passed the unparsed DB name here, as get_partitions_ps expects to parse it List partitionsToExchange = get_partitions_ps(sourceDbName, sourceTableName, partVals, (short)-1); if (partitionsToExchange == null || partitionsToExchange.isEmpty()) { @@ -3512,8 +3823,8 @@ public Partition exchange_partition(Map partitionSpecs, Lists.newArrayListWithCapacity(partitionsToExchange.size()); // Check if any of the partitions already exists in destTable. - List destPartitionNames = - ms.listPartitionNames(destDbName, destTableName, (short) -1); + List destPartitionNames = ms.listPartitionNames(parsedDestDbName[CAT_NAME], + parsedDestDbName[DB_NAME], destTableName, (short) -1); if (destPartitionNames != null && !destPartitionNames.isEmpty()) { for (Partition partition : partitionsToExchange) { String partToExchangeName = @@ -3528,14 +3839,14 @@ public Partition exchange_partition(Map partitionSpecs, try { for (Partition partition: partitionsToExchange) { Partition destPartition = new Partition(partition); - destPartition.setDbName(destDbName); + destPartition.setDbName(parsedDestDbName[DB_NAME]); destPartition.setTableName(destinationTable.getTableName()); Path destPartitionPath = new Path(destinationTable.getSd().getLocation(), Warehouse.makePartName(destinationTable.getPartitionKeys(), partition.getValues())); destPartition.getSd().setLocation(destPartitionPath.toString()); ms.addPartition(destPartition); destPartitions.add(destPartition); - ms.dropPartition(partition.getDbName(), sourceTable.getTableName(), + ms.dropPartition(parsedSourceDbName[CAT_NAME], partition.getDbName(), sourceTable.getTableName(), partition.getValues()); } Path destParentPath = destPath.getParent(); @@ -3607,9 +3918,10 @@ public Partition exchange_partition(Map partitionSpecs, } } - private boolean drop_partition_common(RawStore ms, String db_name, String tbl_name, - List part_vals, final boolean deleteData, final EnvironmentContext envContext) - throws MetaException, NoSuchObjectException, IOException, InvalidObjectException, + private boolean drop_partition_common(RawStore ms, String catName, String db_name, + String tbl_name, List part_vals, + final boolean deleteData, final EnvironmentContext envContext) + throws MetaException, NoSuchObjectException, IOException, InvalidObjectException, InvalidInputException { boolean success = false; Path partPath = null; @@ -3633,8 +3945,8 @@ private boolean drop_partition_common(RawStore ms, String db_name, String tbl_na try { ms.openTransaction(); - part = ms.getPartition(db_name, tbl_name, part_vals); - tbl = get_table_core(db_name, tbl_name); + part = ms.getPartition(catName, db_name, tbl_name, part_vals); + tbl = get_table_core(catName, db_name, tbl_name); isExternalTbl = isExternal(tbl); firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this)); mustPurge = isMustPurge(envContext, tbl); @@ -3655,7 +3967,7 @@ private boolean drop_partition_common(RawStore ms, String db_name, String tbl_na verifyIsWritablePath(partPath); } - if (!ms.dropPartition(db_name, tbl_name, part_vals)) { + if (!ms.dropPartition(catName, db_name, tbl_name, part_vals)) { throw new MetaException("Unable to drop partition"); } else { if (!transactionalListeners.isEmpty()) { @@ -3746,6 +4058,7 @@ public DropPartitionsResult drop_partitions_req( DropPartitionsRequest request) throws TException { RawStore ms = getMS(); String dbName = request.getDbName(), tblName = request.getTblName(); + String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); boolean ifExists = request.isSetIfExists() && request.isIfExists(); boolean deleteData = request.isSetDeleteData() && request.isDeleteData(); boolean ignoreProtection = request.isSetIgnoreProtection() && request.isIgnoreProtection(); @@ -3765,7 +4078,7 @@ public DropPartitionsResult drop_partitions_req( try { // We need Partition-s for firing events and for result; DN needs MPartition-s to drop. // Great... Maybe we could bypass fetching MPartitions by issuing direct SQL deletes. - tbl = get_table_core(dbName, tblName); + tbl = get_table_core(catName, dbName, tblName); isExternal(tbl); mustPurge = isMustPurge(envContext, tbl); int minCount = 0; @@ -3778,7 +4091,7 @@ public DropPartitionsResult drop_partitions_req( ++minCount; // At least one partition per expression, if not ifExists List result = new ArrayList<>(); boolean hasUnknown = ms.getPartitionsByExpr( - dbName, tblName, expr.getExpr(), null, (short)-1, result); + catName, dbName, tblName, expr.getExpr(), null, (short)-1, result); if (hasUnknown) { // Expr is built by DDLSA, it should only contain part cols and simple ops throw new MetaException("Unexpected unknown partitions to drop"); @@ -3799,7 +4112,7 @@ public DropPartitionsResult drop_partitions_req( } else if (spec.isSetNames()) { partNames = spec.getNames(); minCount = partNames.size(); - parts = ms.getPartitionsByNames(dbName, tblName, partNames); + parts = ms.getPartitionsByNames(catName, dbName, tblName, partNames); } else { throw new MetaException("Partition spec is not set"); } @@ -3840,7 +4153,7 @@ public DropPartitionsResult drop_partitions_req( } } - ms.dropPartitions(dbName, tblName, partNames); + ms.dropPartitions(catName, dbName, tblName, partNames); if (parts != null && !transactionalListeners.isEmpty()) { for (Partition part : parts) { transactionalListenerResponses.add( @@ -3918,13 +4231,16 @@ public boolean drop_partition_with_environment_context(final String db_name, final String tbl_name, final List part_vals, final boolean deleteData, final EnvironmentContext envContext) throws TException { - startPartitionFunction("drop_partition", db_name, tbl_name, part_vals); + String[] parsedDbName = parseDbName(db_name, conf); + startPartitionFunction("drop_partition", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, part_vals); LOG.info("Partition values:" + part_vals); boolean ret = false; Exception ex = null; try { - ret = drop_partition_common(getMS(), db_name, tbl_name, part_vals, deleteData, envContext); + ret = drop_partition_common(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, part_vals, deleteData, envContext); } catch (IOException e) { ex = e; throw new MetaException(e.getMessage()); @@ -3941,13 +4257,15 @@ public boolean drop_partition_with_environment_context(final String db_name, @Override public Partition get_partition(final String db_name, final String tbl_name, final List part_vals) throws MetaException, NoSuchObjectException { - startPartitionFunction("get_partition", db_name, tbl_name, part_vals); + String[] parsedDbName = parseDbName(db_name, conf); + startPartitionFunction("get_partition", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, part_vals); Partition ret = null; Exception ex = null; try { - fireReadTablePreEvent(db_name, tbl_name); - ret = getMS().getPartition(db_name, tbl_name, part_vals); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + ret = getMS().getPartition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals); } catch (Exception e) { ex = e; throwMetaException(e); @@ -3960,19 +4278,15 @@ public Partition get_partition(final String db_name, final String tbl_name, /** * Fire a pre-event for read table operation, if there are any * pre-event listeners registered - * - * @param dbName - * @param tblName - * @throws MetaException - * @throws NoSuchObjectException */ - private void fireReadTablePreEvent(String dbName, String tblName) throws MetaException, NoSuchObjectException { + private void fireReadTablePreEvent(String catName, String dbName, String tblName) + throws MetaException, NoSuchObjectException { if(preListeners.size() > 0) { // do this only if there is a pre event listener registered (avoid unnecessary // metastore api call) - Table t = getMS().getTable(dbName, tblName); + Table t = getMS().getTable(catName, dbName, tblName); if (t == null) { - throw new NoSuchObjectException(dbName + "." + tblName + throw new NoSuchObjectException(getCatalogQualifiedTableName(catName, dbName, tblName) + " table not found"); } firePreEvent(new PreReadTableEvent(t, this)); @@ -3984,14 +4298,15 @@ public Partition get_partition_with_auth(final String db_name, final String tbl_name, final List part_vals, final String user_name, final List group_names) throws TException { - startPartitionFunction("get_partition_with_auth", db_name, tbl_name, - part_vals); - fireReadTablePreEvent(db_name, tbl_name); + String[] parsedDbName = parseDbName(db_name, conf); + startPartitionFunction("get_partition_with_auth", parsedDbName[CAT_NAME], + parsedDbName[DB_NAME], tbl_name, part_vals); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); Partition ret = null; Exception ex = null; try { - ret = getMS().getPartitionWithAuth(db_name, tbl_name, part_vals, - user_name, group_names); + ret = getMS().getPartitionWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, part_vals, user_name, group_names); } catch (InvalidObjectException e) { ex = e; throw new NoSuchObjectException(e.getMessage()); @@ -4007,13 +4322,16 @@ public Partition get_partition_with_auth(final String db_name, @Override public List get_partitions(final String db_name, final String tbl_name, final short max_parts) throws NoSuchObjectException, MetaException { - startTableFunction("get_partitions", db_name, tbl_name); - fireReadTablePreEvent(db_name, tbl_name); + String[] parsedDbName = parseDbName(db_name, conf); + startTableFunction("get_partitions", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); List ret = null; Exception ex = null; try { - checkLimitNumberOfPartitionsByFilter(db_name, tbl_name, NO_FILTER_STRING, max_parts); - ret = getMS().getPartitions(db_name, tbl_name, max_parts); + checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, NO_FILTER_STRING, max_parts); + ret = getMS().getPartitions(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, + max_parts); } catch (Exception e) { ex = e; throwMetaException(e); @@ -4028,14 +4346,16 @@ public Partition get_partition_with_auth(final String db_name, public List get_partitions_with_auth(final String dbName, final String tblName, final short maxParts, final String userName, final List groupNames) throws TException { - startTableFunction("get_partitions_with_auth", dbName, tblName); + String[] parsedDbName = parseDbName(dbName, conf); + startTableFunction("get_partitions_with_auth", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); List ret = null; Exception ex = null; try { - checkLimitNumberOfPartitionsByFilter(dbName, tblName, NO_FILTER_STRING, maxParts); - ret = getMS().getPartitionsWithAuth(dbName, tblName, maxParts, - userName, groupNames); + checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tblName, NO_FILTER_STRING, maxParts); + ret = getMS().getPartitionsWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, + maxParts, userName, groupNames); } catch (InvalidObjectException e) { ex = e; throw new NoSuchObjectException(e.getMessage()); @@ -4049,15 +4369,21 @@ public Partition get_partition_with_auth(final String db_name, } - private void checkLimitNumberOfPartitionsByFilter(String dbName, String tblName, String filterString, int maxParts) throws TException { + private void checkLimitNumberOfPartitionsByFilter(String catName, String dbName, + String tblName, String filterString, + int maxParts) throws TException { if (isPartitionLimitEnabled()) { - checkLimitNumberOfPartitions(tblName, get_num_partitions_by_filter(dbName, tblName, filterString), maxParts); + checkLimitNumberOfPartitions(tblName, get_num_partitions_by_filter(prependCatalogToDbName( + catName, dbName, conf), tblName, filterString), maxParts); } } - private void checkLimitNumberOfPartitionsByExpr(String dbName, String tblName, byte[] filterExpr, int maxParts) throws TException { + private void checkLimitNumberOfPartitionsByExpr(String catName, String dbName, String tblName, + byte[] filterExpr, int maxParts) + throws TException { if (isPartitionLimitEnabled()) { - checkLimitNumberOfPartitions(tblName, get_num_partitions_by_expr(dbName, tblName, filterExpr), maxParts); + checkLimitNumberOfPartitions(tblName, get_num_partitions_by_expr(catName, dbName, tblName, + filterExpr), maxParts); } } @@ -4082,15 +4408,16 @@ private void checkLimitNumberOfPartitions(String tblName, int numPartitions, int public List get_partitions_pspec(final String db_name, final String tbl_name, final int max_parts) throws NoSuchObjectException, MetaException { - String dbName = db_name.toLowerCase(); + String[] parsedDbName = parseDbName(db_name, conf); String tableName = tbl_name.toLowerCase(); - startTableFunction("get_partitions_pspec", dbName, tableName); + startTableFunction("get_partitions_pspec", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); List partitionSpecs = null; try { - Table table = get_table_core(dbName, tableName); - List partitions = get_partitions(dbName, tableName, (short) max_parts); + Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); + // get_partitions will parse out the catalog and db names itself + List partitions = get_partitions(db_name, tableName, (short) max_parts); if (is_partition_spec_grouping_enabled(table)) { partitionSpecs = get_partitionspecs_grouped_by_storage_descriptor(table, partitions); @@ -4098,7 +4425,8 @@ private void checkLimitNumberOfPartitions(String tblName, int numPartitions, int else { PartitionSpec pSpec = new PartitionSpec(); pSpec.setPartitionList(new PartitionListComposingSpec(partitions)); - pSpec.setDbName(dbName); + pSpec.setCatName(parsedDbName[CAT_NAME]); + pSpec.setDbName(parsedDbName[DB_NAME]); pSpec.setTableName(tableName); pSpec.setRootPath(table.getSd().getLocation()); partitionSpecs = Arrays.asList(pSpec); @@ -4238,12 +4566,14 @@ private static boolean is_partition_spec_grouping_enabled(Table table) { @Override public List get_partition_names(final String db_name, final String tbl_name, final short max_parts) throws NoSuchObjectException, MetaException { - startTableFunction("get_partition_names", db_name, tbl_name); - fireReadTablePreEvent(db_name, tbl_name); + String[] parsedDbName = parseDbName(db_name, conf); + startTableFunction("get_partition_names", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); List ret = null; Exception ex = null; try { - ret = getMS().listPartitionNames(db_name, tbl_name, max_parts); + ret = getMS().listPartitionNames(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, + max_parts); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -4259,11 +4589,14 @@ private static boolean is_partition_spec_grouping_enabled(Table table) { @Override public PartitionValuesResponse get_partition_values(PartitionValuesRequest request) throws MetaException { + String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); String dbName = request.getDbName(); String tblName = request.getTblName(); + // This is serious black magic, as the following 2 lines do nothing AFAICT but without them + // the subsequent call to listPartitionValues fails. List partCols = new ArrayList(); partCols.add(request.getPartitionKeys().get(0)); - return getMS().listPartitionValues(dbName, tblName, request.getPartitionKeys(), + return getMS().listPartitionValues(catName, dbName, tblName, request.getPartitionKeys(), request.isApplyDistinct(), request.getFilter(), request.isAscending(), request.getPartitionOrder(), request.getMaxParts()); } @@ -4280,8 +4613,9 @@ public void alter_partition_with_environment_context(final String dbName, final String tableName, final Partition newPartition, final EnvironmentContext envContext) throws TException { - rename_partition(dbName, tableName, null, - newPartition, envContext); + String[] parsedDbName = parseDbName(dbName, conf); + rename_partition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, null, newPartition, + envContext); } @Override @@ -4289,14 +4623,16 @@ public void rename_partition(final String db_name, final String tbl_name, final List part_vals, final Partition new_part) throws TException { // Call rename_partition without an environment context. - rename_partition(db_name, tbl_name, part_vals, new_part, null); + String[] parsedDbName = parseDbName(db_name, conf); + rename_partition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals, new_part, + null); } - private void rename_partition(final String db_name, final String tbl_name, + private void rename_partition(final String catName, final String db_name, final String tbl_name, final List part_vals, final Partition new_part, final EnvironmentContext envContext) throws TException { - startTableFunction("alter_partition", db_name, tbl_name); + startTableFunction("alter_partition", catName, db_name, tbl_name); if (LOG.isInfoEnabled()) { LOG.info("New partition values:" + new_part.getValues()); @@ -4314,6 +4650,9 @@ private void rename_partition(final String db_name, final String tbl_name, } } + // Make sure the new partition has the catalog value set + if (!new_part.isSetCatName()) new_part.setCatName(catName); + Partition oldPart = null; Exception ex = null; try { @@ -4323,14 +4662,14 @@ private void rename_partition(final String db_name, final String tbl_name, partitionValidationPattern); } - oldPart = alterHandler.alterPartition(getMS(), wh, db_name, tbl_name, part_vals, new_part, - envContext, this); + oldPart = alterHandler.alterPartition(getMS(), wh, catName, db_name, tbl_name, + part_vals, new_part, envContext, this); // Only fetch the table if we actually have a listener Table table = null; if (!listeners.isEmpty()) { if (table == null) { - table = getMS().getTable(db_name, tbl_name); + table = getMS().getTable(catName, db_name, tbl_name); } MetaStoreListenerNotifier.notifyEvent(listeners, @@ -4370,7 +4709,8 @@ public void alter_partitions_with_environment_context(final String db_name, fina final List new_parts, EnvironmentContext environmentContext) throws TException { - startTableFunction("alter_partitions", db_name, tbl_name); + String[] parsedDbName = parseDbName(db_name, conf); + startTableFunction("alter_partitions", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); if (LOG.isInfoEnabled()) { for (Partition tmpPart : new_parts) { @@ -4383,10 +4723,10 @@ public void alter_partitions_with_environment_context(final String db_name, fina Exception ex = null; try { for (Partition tmpPart : new_parts) { - firePreEvent(new PreAlterPartitionEvent(db_name, tbl_name, null, tmpPart, this)); + firePreEvent(new PreAlterPartitionEvent(parsedDbName[DB_NAME], tbl_name, null, tmpPart, this)); } - oldParts = alterHandler.alterPartitions(getMS(), wh, db_name, tbl_name, new_parts, - environmentContext, this); + oldParts = alterHandler.alterPartitions(getMS(), wh, parsedDbName[CAT_NAME], + parsedDbName[DB_NAME], tbl_name, new_parts, environmentContext, this); Iterator olditr = oldParts.iterator(); // Only fetch the table if we have a listener that needs it. Table table = null; @@ -4400,7 +4740,7 @@ public void alter_partitions_with_environment_context(final String db_name, fina } if (table == null) { - table = getMS().getTable(db_name, tbl_name); + table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); } if (!listeners.isEmpty()) { @@ -4440,7 +4780,8 @@ public void alter_table(final String dbname, final String name, final Table newTable) throws InvalidOperationException, MetaException { // Do not set an environment context. - alter_table_core(dbname,name, newTable, null); + String[] parsedDbName = parseDbName(dbname, conf); + alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, newTable, null); } @Override @@ -4452,7 +4793,8 @@ public void alter_table_with_cascade(final String dbname, final String name, envContext = new EnvironmentContext(); envContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); } - alter_table_core(dbname, name, newTable, envContext); + String[] parsedDbName = parseDbName(dbname, conf); + alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, newTable, envContext); } @Override @@ -4460,13 +4802,14 @@ public void alter_table_with_environment_context(final String dbname, final String name, final Table newTable, final EnvironmentContext envContext) throws InvalidOperationException, MetaException { - alter_table_core(dbname, name, newTable, envContext); + String[] parsedDbName = parseDbName(dbname, conf); + alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, newTable, envContext); } - private void alter_table_core(final String dbname, final String name, final Table newTable, - final EnvironmentContext envContext) + private void alter_table_core(final String catName, final String dbname, final String name, + final Table newTable, final EnvironmentContext envContext) throws InvalidOperationException, MetaException { - startFunction("alter_table", ": db=" + dbname + " tbl=" + name + startFunction("alter_table", ": " + getCatalogQualifiedTableName(catName, dbname, name) + " newtbl=" + newTable.getTableName()); // Update the time if it hasn't been specified. if (newTable.getParameters() == null || @@ -4483,13 +4826,15 @@ private void alter_table_core(final String dbname, final String name, final Tabl newTable.getSd().setLocation(tblPath.toString()); } } + // Set the catalog name if it hasn't been set in the new table + if (!newTable.isSetCatName()) newTable.setCatName(catName); boolean success = false; Exception ex = null; try { - Table oldt = get_table_core(dbname, name); + Table oldt = get_table_core(catName, dbname, name); firePreEvent(new PreAlterTableEvent(oldt, newTable, this)); - alterHandler.alterTable(getMS(), wh, dbname, name, newTable, + alterHandler.alterTable(getMS(), wh, catName, dbname, name, newTable, envContext, this); success = true; if (!listeners.isEmpty()) { @@ -4508,8 +4853,8 @@ private void alter_table_core(final String dbname, final String name, final Tabl new CreateTableEvent(newTable, true, this), envContext); if (newTable.getPartitionKeysSize() != 0) { - List partitions - = getMS().getPartitions(newTable.getDbName(), newTable.getTableName(), -1); + List partitions = getMS().getPartitions(catName, + newTable.getDbName(), newTable.getTableName(), -1); MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PARTITION, new AddPartitionEvent(newTable, partitions, true, this), @@ -4542,8 +4887,9 @@ private void alter_table_core(final String dbname, final String name, final Tabl List ret = null; Exception ex = null; + String[] parsedDbName = parseDbName(dbname, conf); try { - ret = getMS().getTables(dbname, pattern); + ret = getMS().getTables(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], pattern); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -4564,8 +4910,9 @@ private void alter_table_core(final String dbname, final String name, final Tabl List ret = null; Exception ex = null; + String[] parsedDbName = parseDbName(dbname, conf); try { - ret = getMS().getTables(dbname, pattern, TableType.valueOf(tableType)); + ret = getMS().getTables(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], pattern, TableType.valueOf(tableType)); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -4586,8 +4933,9 @@ private void alter_table_core(final String dbname, final String name, final Tabl List ret = null; Exception ex = null; + String[] parsedDbName = parseDbName(dbname, conf); try { - ret = getMS().getMaterializedViewsForRewriting(dbname); + ret = getMS().getMaterializedViewsForRewriting(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -4607,8 +4955,9 @@ private void alter_table_core(final String dbname, final String name, final Tabl List ret = null; Exception ex = null; + String[] parsedDbName = parseDbName(dbname, conf); try { - ret = getMS().getAllTables(dbname); + ret = getMS().getAllTables(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -4635,6 +4984,7 @@ private void alter_table_core(final String dbname, final String name, final Tabl startFunction("get_fields_with_environment_context", ": db=" + db + "tbl=" + tableName); String[] names = tableName.split("\\."); String base_table_name = names[0]; + String[] parsedDbName = parseDbName(db, conf); Table tbl; List ret = null; @@ -4642,7 +4992,7 @@ private void alter_table_core(final String dbname, final String name, final Tabl ClassLoader orgHiveLoader = null; try { try { - tbl = get_table_core(db, base_table_name); + tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], base_table_name); } catch (NoSuchObjectException e) { throw new UnknownTableException(e.getMessage()); } @@ -4735,13 +5085,15 @@ private StorageSchemaReader getStorageSchemaReader() throws MetaException { try { String[] names = tableName.split("\\."); String base_table_name = names[0]; + String[] parsedDbName = parseDbName(db, conf); Table tbl; try { - tbl = get_table_core(db, base_table_name); + tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], base_table_name); } catch (NoSuchObjectException e) { throw new UnknownTableException(e.getMessage()); } + // Pass unparsed db name here List fieldSchemas = get_fields_with_environment_context(db, base_table_name,envContext); if (tbl == null || fieldSchemas == null) { @@ -4846,9 +5198,10 @@ public String get_config_value(String name, String defaultValue) return partVals; } - private List getPartValsFromName(RawStore ms, String dbName, String tblName, - String partName) throws MetaException, InvalidObjectException { - Table t = ms.getTable(dbName, tblName); + private List getPartValsFromName(RawStore ms, String catName, String dbName, + String tblName, String partName) + throws MetaException, InvalidObjectException { + Table t = ms.getTable(catName, dbName, tblName); if (t == null) { throw new InvalidObjectException(dbName + "." + tblName + " table not found"); @@ -4856,20 +5209,20 @@ public String get_config_value(String name, String defaultValue) return getPartValsFromName(t, partName); } - private Partition get_partition_by_name_core(final RawStore ms, final String db_name, - final String tbl_name, final String part_name) - throws TException { - fireReadTablePreEvent(db_name, tbl_name); + private Partition get_partition_by_name_core(final RawStore ms, final String catName, + final String db_name, final String tbl_name, + final String part_name) throws TException { + fireReadTablePreEvent(catName, db_name, tbl_name); List partVals; try { - partVals = getPartValsFromName(ms, db_name, tbl_name, part_name); + partVals = getPartValsFromName(ms, catName, db_name, tbl_name, part_name); } catch (InvalidObjectException e) { throw new NoSuchObjectException(e.getMessage()); } - Partition p = ms.getPartition(db_name, tbl_name, partVals); + Partition p = ms.getPartition(catName, db_name, tbl_name, partVals); if (p == null) { - throw new NoSuchObjectException(db_name + "." + tbl_name + throw new NoSuchObjectException(getCatalogQualifiedTableName(catName, db_name, tbl_name) + " partition (" + part_name + ") not found"); } return p; @@ -4879,13 +5232,15 @@ private Partition get_partition_by_name_core(final RawStore ms, final String db_ public Partition get_partition_by_name(final String db_name, final String tbl_name, final String part_name) throws TException { - startFunction("get_partition_by_name", ": db=" + db_name + " tbl=" - + tbl_name + " part=" + part_name); + String[] parsedDbName = parseDbName(db_name, conf); + startFunction("get_partition_by_name", ": tbl=" + + getCatalogQualifiedTableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name) + + " part=" + part_name); Partition ret = null; Exception ex = null; try { - ret = get_partition_by_name_core(getMS(), db_name, tbl_name, part_name); - } catch (Exception e) { + ret = get_partition_by_name_core(getMS(), parsedDbName[CAT_NAME], + parsedDbName[DB_NAME], tbl_name, part_name); } catch (Exception e) { ex = e; rethrowException(e); } finally { @@ -4904,15 +5259,17 @@ public Partition append_partition_by_name(final String db_name, final String tbl public Partition append_partition_by_name_with_environment_context(final String db_name, final String tbl_name, final String part_name, final EnvironmentContext env_context) throws TException { - startFunction("append_partition_by_name", ": db=" + db_name + " tbl=" - + tbl_name + " part=" + part_name); + String[] parsedDbName = parseDbName(db_name, conf); + startFunction("append_partition_by_name", ": tbl=" + + getCatalogQualifiedTableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name) + " part=" + part_name); Partition ret = null; Exception ex = null; try { RawStore ms = getMS(); - List partVals = getPartValsFromName(ms, db_name, tbl_name, part_name); - ret = append_partition_common(ms, db_name, tbl_name, partVals, env_context); + List partVals = getPartValsFromName(ms, parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_name); + ret = append_partition_common(ms, parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, partVals, env_context); } catch (Exception e) { ex = e; if (e instanceof InvalidObjectException) { @@ -4930,18 +5287,20 @@ public Partition append_partition_by_name_with_environment_context(final String return ret; } - private boolean drop_partition_by_name_core(final RawStore ms, final String db_name, - final String tbl_name, final String part_name, final boolean deleteData, - final EnvironmentContext envContext) throws TException, IOException { + private boolean drop_partition_by_name_core(final RawStore ms, final String catName, + final String db_name, final String tbl_name, + final String part_name, final boolean deleteData, + final EnvironmentContext envContext) + throws TException, IOException { List partVals; try { - partVals = getPartValsFromName(ms, db_name, tbl_name, part_name); + partVals = getPartValsFromName(ms, catName, db_name, tbl_name, part_name); } catch (InvalidObjectException e) { throw new NoSuchObjectException(e.getMessage()); } - return drop_partition_common(ms, db_name, tbl_name, partVals, deleteData, envContext); + return drop_partition_common(ms, catName, db_name, tbl_name, partVals, deleteData, envContext); } @Override @@ -4955,14 +5314,16 @@ public boolean drop_partition_by_name(final String db_name, final String tbl_nam public boolean drop_partition_by_name_with_environment_context(final String db_name, final String tbl_name, final String part_name, final boolean deleteData, final EnvironmentContext envContext) throws TException { - startFunction("drop_partition_by_name", ": db=" + db_name + " tbl=" - + tbl_name + " part=" + part_name); + String[] parsedDbName = parseDbName(db_name, conf); + startFunction("drop_partition_by_name", ": tbl=" + + getCatalogQualifiedTableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name) + + " part=" + part_name); boolean ret = false; Exception ex = null; try { - ret = drop_partition_by_name_core(getMS(), db_name, tbl_name, - part_name, deleteData, envContext); + ret = drop_partition_by_name_core(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, part_name, deleteData, envContext); } catch (IOException e) { ex = e; throw new MetaException(e.getMessage()); @@ -4980,11 +5341,14 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n public List get_partitions_ps(final String db_name, final String tbl_name, final List part_vals, final short max_parts) throws TException { - startPartitionFunction("get_partitions_ps", db_name, tbl_name, part_vals); + String[] parsedDbName = parseDbName(db_name, conf); + startPartitionFunction("get_partitions_ps", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, part_vals); List ret = null; Exception ex = null; try { + // Don't send the parsedDbName, as this method will parse itself. ret = get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, null, null); } catch (Exception e) { @@ -5002,14 +5366,15 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n final String tbl_name, final List part_vals, final short max_parts, final String userName, final List groupNames) throws TException { - startPartitionFunction("get_partitions_ps_with_auth", db_name, tbl_name, - part_vals); - fireReadTablePreEvent(db_name, tbl_name); + String[] parsedDbName = parseDbName(db_name, conf); + startPartitionFunction("get_partitions_ps_with_auth", parsedDbName[CAT_NAME], + parsedDbName[DB_NAME], tbl_name, part_vals); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); List ret = null; Exception ex = null; try { - ret = getMS().listPartitionsPsWithAuth(db_name, tbl_name, part_vals, max_parts, - userName, groupNames); + ret = getMS().listPartitionsPsWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, part_vals, max_parts, userName, groupNames); } catch (InvalidObjectException e) { ex = e; throw new MetaException(e.getMessage()); @@ -5026,12 +5391,15 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n public List get_partition_names_ps(final String db_name, final String tbl_name, final List part_vals, final short max_parts) throws TException { - startPartitionFunction("get_partitions_names_ps", db_name, tbl_name, part_vals); - fireReadTablePreEvent(db_name, tbl_name); + String[] parsedDbName = parseDbName(db_name, conf); + startPartitionFunction("get_partitions_names_ps", parsedDbName[CAT_NAME], + parsedDbName[DB_NAME], tbl_name, part_vals); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); List ret = null; Exception ex = null; try { - ret = getMS().listPartitionNamesPs(db_name, tbl_name, part_vals, max_parts); + ret = getMS().listPartitionNamesPs(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, + part_vals, max_parts); } catch (Exception e) { ex = e; rethrowException(e); @@ -5060,7 +5428,6 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n return Warehouse.makeSpecFromName(part_name); } - private String lowerCaseConvertPartName(String partName) throws MetaException { boolean isFirst = true; Map partSpec = Warehouse.makeEscSpecFromName(partName); @@ -5083,15 +5450,18 @@ private String lowerCaseConvertPartName(String partName) throws MetaException { @Override public ColumnStatistics get_table_column_statistics(String dbName, String tableName, String colName) throws TException { - dbName = dbName.toLowerCase(); + String[] parsedDbName = parseDbName(dbName, conf); + parsedDbName[CAT_NAME] = parsedDbName[CAT_NAME].toLowerCase(); + parsedDbName[DB_NAME] = parsedDbName[DB_NAME].toLowerCase(); tableName = tableName.toLowerCase(); colName = colName.toLowerCase(); - startFunction("get_column_statistics_by_table", ": db=" + dbName + " table=" + tableName + - " column=" + colName); + startFunction("get_column_statistics_by_table", ": table=" + + getCatalogQualifiedTableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tableName) + " column=" + colName); ColumnStatistics statsObj = null; try { statsObj = getMS().getTableColumnStatistics( - dbName, tableName, Lists.newArrayList(colName)); + parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, Lists.newArrayList(colName)); if (statsObj != null) { assert statsObj.getStatsObjSize() <= 1; } @@ -5103,16 +5473,19 @@ public ColumnStatistics get_table_column_statistics(String dbName, String tableN @Override public TableStatsResult get_table_statistics_req(TableStatsRequest request) throws TException { + String catName = request.isSetCatName() ? request.getCatName().toLowerCase() : + getDefaultCatalog(conf); String dbName = request.getDbName().toLowerCase(); String tblName = request.getTblName().toLowerCase(); - startFunction("get_table_statistics_req", ": db=" + dbName + " table=" + tblName); + startFunction("get_table_statistics_req", ": table=" + + getCatalogQualifiedTableName(catName, dbName, tblName)); TableStatsResult result = null; List lowerCaseColNames = new ArrayList<>(request.getColNames().size()); for (String colName : request.getColNames()) { lowerCaseColNames.add(colName.toLowerCase()); } try { - ColumnStatistics cs = getMS().getTableColumnStatistics(dbName, tblName, lowerCaseColNames); + ColumnStatistics cs = getMS().getTableColumnStatistics(catName, dbName, tblName, lowerCaseColNames); result = new TableStatsResult((cs == null || cs.getStatsObj() == null) ? Lists.newArrayList() : cs.getStatsObj()); } finally { @@ -5125,16 +5498,18 @@ public TableStatsResult get_table_statistics_req(TableStatsRequest request) thro public ColumnStatistics get_partition_column_statistics(String dbName, String tableName, String partName, String colName) throws TException { dbName = dbName.toLowerCase(); + String[] parsedDbName = parseDbName(dbName, conf); tableName = tableName.toLowerCase(); colName = colName.toLowerCase(); String convertedPartName = lowerCaseConvertPartName(partName); - startFunction("get_column_statistics_by_partition", - ": db=" + dbName + " table=" + tableName - + " partition=" + convertedPartName + " column=" + colName); + startFunction("get_column_statistics_by_partition", ": table=" + + getCatalogQualifiedTableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tableName) + " partition=" + convertedPartName + " column=" + colName); ColumnStatistics statsObj = null; try { - List list = getMS().getPartitionColumnStatistics(dbName, tableName, + List list = getMS().getPartitionColumnStatistics( + parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, Lists.newArrayList(convertedPartName), Lists.newArrayList(colName)); if (list.isEmpty()) { return null; @@ -5152,9 +5527,11 @@ public ColumnStatistics get_partition_column_statistics(String dbName, String ta @Override public PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request) throws TException { + String catName = request.isSetCatName() ? request.getCatName().toLowerCase() : getDefaultCatalog(conf); String dbName = request.getDbName().toLowerCase(); String tblName = request.getTblName().toLowerCase(); - startFunction("get_partitions_statistics_req", ": db=" + dbName + " table=" + tblName); + startFunction("get_partitions_statistics_req", ": table=" + + getCatalogQualifiedTableName(catName, dbName, tblName)); PartitionsStatsResult result = null; List lowerCaseColNames = new ArrayList<>(request.getColNames().size()); @@ -5167,7 +5544,7 @@ public PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsReques } try { List stats = getMS().getPartitionColumnStatistics( - dbName, tblName, lowerCasePartNames, lowerCaseColNames); + catName, dbName, tblName, lowerCasePartNames, lowerCaseColNames); Map> map = new HashMap<>(); for (ColumnStatistics stat : stats) { map.put(stat.getStatsDesc().getPartName(), stat.getStatsObj()); @@ -5181,13 +5558,16 @@ public PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsReques @Override public boolean update_table_column_statistics(ColumnStatistics colStats) throws TException { + String catName; String dbName; String tableName; String colName; ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); + catName = statsDesc.isSetCatName() ? statsDesc.getCatName().toLowerCase() : getDefaultCatalog(conf); dbName = statsDesc.getDbName().toLowerCase(); tableName = statsDesc.getTableName().toLowerCase(); + statsDesc.setCatName(catName); statsDesc.setDbName(dbName); statsDesc.setTableName(tableName); long time = System.currentTimeMillis() / 1000; @@ -5195,8 +5575,8 @@ public boolean update_table_column_statistics(ColumnStatistics colStats) throws List statsObjs = colStats.getStatsObj(); - startFunction("write_column_statistics", ": db=" + dbName - + " table=" + tableName); + startFunction("write_column_statistics", ": table=" + + Warehouse.getCatalogQualifiedTableName(catName, dbName, tableName)); for (ColumnStatisticsObj statsObj:statsObjs) { colName = statsObj.getColName().toLowerCase(); statsObj.setColName(colName); @@ -5218,16 +5598,19 @@ public boolean update_table_column_statistics(ColumnStatistics colStats) throws private boolean updatePartitonColStats(Table tbl, ColumnStatistics colStats) throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { + String catName; String dbName; String tableName; String partName; String colName; ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); + catName = statsDesc.isSetCatName() ? statsDesc.getCatName().toLowerCase() : getDefaultCatalog(conf); dbName = statsDesc.getDbName().toLowerCase(); tableName = statsDesc.getTableName().toLowerCase(); partName = lowerCaseConvertPartName(statsDesc.getPartName()); + statsDesc.setCatName(catName); statsDesc.setDbName(dbName); statsDesc.setTableName(tableName); statsDesc.setPartName(partName); @@ -5253,7 +5636,7 @@ private boolean updatePartitonColStats(Table tbl, ColumnStatistics colStats) try { if (tbl == null) { - tbl = getTable(dbName, tableName); + tbl = getTable(catName, dbName, tableName); } List partVals = getPartValsFromName(tbl, partName); ret = getMS().updatePartitionColumnStatistics(colStats, partVals); @@ -5272,19 +5655,20 @@ public boolean update_partition_column_statistics(ColumnStatistics colStats) thr public boolean delete_partition_column_statistics(String dbName, String tableName, String partName, String colName) throws TException { dbName = dbName.toLowerCase(); + String[] parsedDbName = parseDbName(dbName, conf); tableName = tableName.toLowerCase(); if (colName != null) { colName = colName.toLowerCase(); } String convertedPartName = lowerCaseConvertPartName(partName); - startFunction("delete_column_statistics_by_partition",": db=" + dbName - + " table=" + tableName + " partition=" + convertedPartName - + " column=" + colName); + startFunction("delete_column_statistics_by_partition",": table=" + + getCatalogQualifiedTableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName) + + " partition=" + convertedPartName + " column=" + colName); boolean ret = false; try { - List partVals = getPartValsFromName(getMS(), dbName, tableName, convertedPartName); - ret = getMS().deletePartitionColumnStatistics(dbName, tableName, + List partVals = getPartValsFromName(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, convertedPartName); + ret = getMS().deletePartitionColumnStatistics(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, convertedPartName, partVals, colName); } finally { endFunction("delete_column_statistics_by_partition", ret != false, null, tableName); @@ -5298,15 +5682,18 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S dbName = dbName.toLowerCase(); tableName = tableName.toLowerCase(); + String[] parsedDbName = parseDbName(dbName, conf); + if (colName != null) { colName = colName.toLowerCase(); } - startFunction("delete_column_statistics_by_table", ": db=" + dbName - + " table=" + tableName + " column=" + colName); + startFunction("delete_column_statistics_by_table", ": table=" + + getCatalogQualifiedTableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName) + " column=" + + colName); boolean ret = false; try { - ret = getMS().deleteTableColumnStatistics(dbName, tableName, colName); + ret = getMS().deleteTableColumnStatistics(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, colName); } finally { endFunction("delete_column_statistics_by_table", ret != false, null, tableName); } @@ -5317,13 +5704,17 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S public List get_partitions_by_filter(final String dbName, final String tblName, final String filter, final short maxParts) throws TException { - startTableFunction("get_partitions_by_filter", dbName, tblName); - fireReadTablePreEvent(dbName, tblName); + String[] parsedDbName = parseDbName(dbName, conf); + startTableFunction("get_partitions_by_filter", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tblName); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); List ret = null; Exception ex = null; try { - checkLimitNumberOfPartitionsByFilter(dbName, tblName, filter, maxParts); - ret = getMS().getPartitionsByFilter(dbName, tblName, filter, maxParts); + checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tblName, filter, maxParts); + ret = getMS().getPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, + filter, maxParts); } catch (Exception e) { ex = e; rethrowException(e); @@ -5338,11 +5729,13 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S final String filter, final int maxParts) throws TException { - startTableFunction("get_partitions_by_filter_pspec", dbName, tblName); + String[] parsedDbName = parseDbName(dbName, conf); + startTableFunction("get_partitions_by_filter_pspec", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); List partitionSpecs = null; try { - Table table = get_table_core(dbName, tblName); + Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); + // Don't pass the parsed db name, as get_partitions_by_filter will parse it itself List partitions = get_partitions_by_filter(dbName, tblName, filter, (short) maxParts); if (is_partition_spec_grouping_enabled(table)) { @@ -5352,7 +5745,8 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S PartitionSpec pSpec = new PartitionSpec(); pSpec.setPartitionList(new PartitionListComposingSpec(partitions)); pSpec.setRootPath(table.getSd().getLocation()); - pSpec.setDbName(dbName); + pSpec.setCatName(parsedDbName[CAT_NAME]); + pSpec.setDbName(parsedDbName[DB_NAME]); pSpec.setTableName(tblName); partitionSpecs = Arrays.asList(pSpec); } @@ -5368,14 +5762,15 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S public PartitionsByExprResult get_partitions_by_expr( PartitionsByExprRequest req) throws TException { String dbName = req.getDbName(), tblName = req.getTblName(); - startTableFunction("get_partitions_by_expr", dbName, tblName); - fireReadTablePreEvent(dbName, tblName); + String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); + startTableFunction("get_partitions_by_expr", catName, dbName, tblName); + fireReadTablePreEvent(catName, dbName, tblName); PartitionsByExprResult ret = null; Exception ex = null; try { - checkLimitNumberOfPartitionsByExpr(dbName, tblName, req.getExpr(), UNLIMITED_MAX_PARTITIONS); + checkLimitNumberOfPartitionsByExpr(catName, dbName, tblName, req.getExpr(), UNLIMITED_MAX_PARTITIONS); List partitions = new LinkedList<>(); - boolean hasUnknownPartitions = getMS().getPartitionsByExpr(dbName, tblName, + boolean hasUnknownPartitions = getMS().getPartitionsByExpr(catName, dbName, tblName, req.getExpr(), req.getDefaultPartitionName(), req.getMaxParts(), partitions); ret = new PartitionsByExprResult(partitions, hasUnknownPartitions); } catch (Exception e) { @@ -5404,12 +5799,15 @@ private void rethrowException(Exception e) throws TException { public int get_num_partitions_by_filter(final String dbName, final String tblName, final String filter) throws TException { - startTableFunction("get_num_partitions_by_filter", dbName, tblName); + String[] parsedDbName = parseDbName(dbName, conf); + startTableFunction("get_num_partitions_by_filter", parsedDbName[CAT_NAME], + parsedDbName[DB_NAME], tblName); int ret = -1; Exception ex = null; try { - ret = getMS().getNumPartitionsByFilter(dbName, tblName, filter); + ret = getMS().getNumPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tblName, filter); } catch (Exception e) { ex = e; rethrowException(e); @@ -5419,15 +5817,13 @@ public int get_num_partitions_by_filter(final String dbName, return ret; } - int get_num_partitions_by_expr(final String dbName, - final String tblName, final byte[] expr) + private int get_num_partitions_by_expr(final String catName, final String dbName, + final String tblName, final byte[] expr) throws TException { - startTableFunction("get_num_partitions_by_expr", dbName, tblName); - int ret = -1; Exception ex = null; try { - ret = getMS().getNumPartitionsByExpr(dbName, tblName, expr); + ret = getMS().getNumPartitionsByExpr(catName, dbName, tblName, expr); } catch (Exception e) { ex = e; rethrowException(e); @@ -5441,12 +5837,15 @@ int get_num_partitions_by_expr(final String dbName, public List get_partitions_by_names(final String dbName, final String tblName, final List partNames) throws TException { - startTableFunction("get_partitions_by_names", dbName, tblName); - fireReadTablePreEvent(dbName, tblName); + String[] parsedDbName = parseDbName(dbName, conf); + startTableFunction("get_partitions_by_names", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tblName); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); List ret = null; Exception ex = null; try { - ret = getMS().getPartitionsByNames(dbName, tblName, partNames); + ret = getMS().getPartitionsByNames(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, + partNames); } catch (Exception e) { ex = e; rethrowException(e); @@ -5460,20 +5859,21 @@ int get_num_partitions_by_expr(final String dbName, public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, String userName, List groupNames) throws TException { firePreEvent(new PreAuthorizationCallEvent(this)); + String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : getDefaultCatalog(conf); if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { String partName = getPartName(hiveObject); - return this.get_column_privilege_set(hiveObject.getDbName(), hiveObject + return this.get_column_privilege_set(catName, hiveObject.getDbName(), hiveObject .getObjectName(), partName, hiveObject.getColumnName(), userName, groupNames); } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { String partName = getPartName(hiveObject); - return this.get_partition_privilege_set(hiveObject.getDbName(), + return this.get_partition_privilege_set(catName, hiveObject.getDbName(), hiveObject.getObjectName(), partName, userName, groupNames); } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { - return this.get_db_privilege_set(hiveObject.getDbName(), userName, + return this.get_db_privilege_set(catName, hiveObject.getDbName(), userName, groupNames); } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { - return this.get_table_privilege_set(hiveObject.getDbName(), hiveObject + return this.get_table_privilege_set(catName, hiveObject.getDbName(), hiveObject .getObjectName(), userName, groupNames); } else if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { return this.get_user_privilege_set(userName, groupNames); @@ -5486,7 +5886,9 @@ private String getPartName(HiveObjectRef hiveObject) throws MetaException { List partValue = hiveObject.getPartValues(); if (partValue != null && partValue.size() > 0) { try { - Table table = get_table_core(hiveObject.getDbName(), hiveObject + String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : + getDefaultCatalog(conf); + Table table = get_table_core(catName, hiveObject.getDbName(), hiveObject .getObjectName()); partName = Warehouse .makePartName(table.getPartitionKeys(), partValue); @@ -5497,7 +5899,7 @@ private String getPartName(HiveObjectRef hiveObject) throws MetaException { return partName; } - private PrincipalPrivilegeSet get_column_privilege_set(final String dbName, + private PrincipalPrivilegeSet get_column_privilege_set(String catName, final String dbName, final String tableName, final String partName, final String columnName, final String userName, final List groupNames) throws TException { incrementCounter("get_column_privilege_set"); @@ -5505,7 +5907,7 @@ private PrincipalPrivilegeSet get_column_privilege_set(final String dbName, PrincipalPrivilegeSet ret; try { ret = getMS().getColumnPrivilegeSet( - dbName, tableName, partName, columnName, userName, groupNames); + catName, dbName, tableName, partName, columnName, userName, groupNames); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -5514,13 +5916,13 @@ private PrincipalPrivilegeSet get_column_privilege_set(final String dbName, return ret; } - private PrincipalPrivilegeSet get_db_privilege_set(final String dbName, + private PrincipalPrivilegeSet get_db_privilege_set(String catName, final String dbName, final String userName, final List groupNames) throws TException { incrementCounter("get_db_privilege_set"); PrincipalPrivilegeSet ret; try { - ret = getMS().getDBPrivilegeSet(dbName, userName, groupNames); + ret = getMS().getDBPrivilegeSet(catName, dbName, userName, groupNames); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -5530,14 +5932,14 @@ private PrincipalPrivilegeSet get_db_privilege_set(final String dbName, } private PrincipalPrivilegeSet get_partition_privilege_set( - final String dbName, final String tableName, final String partName, + String catName, final String dbName, final String tableName, final String partName, final String userName, final List groupNames) throws TException { incrementCounter("get_partition_privilege_set"); PrincipalPrivilegeSet ret; try { - ret = getMS().getPartitionPrivilegeSet(dbName, tableName, partName, + ret = getMS().getPartitionPrivilegeSet(catName, dbName, tableName, partName, userName, groupNames); } catch (MetaException e) { throw e; @@ -5547,14 +5949,14 @@ private PrincipalPrivilegeSet get_partition_privilege_set( return ret; } - private PrincipalPrivilegeSet get_table_privilege_set(final String dbName, + private PrincipalPrivilegeSet get_table_privilege_set(String catName, final String dbName, final String tableName, final String userName, final List groupNames) throws TException { incrementCounter("get_table_privilege_set"); PrincipalPrivilegeSet ret; try { - ret = getMS().getTablePrivilegeSet(dbName, tableName, userName, + ret = getMS().getTablePrivilegeSet(catName, dbName, tableName, userName, groupNames); } catch (MetaException e) { throw e; @@ -5813,52 +6215,53 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, PrincipalType principalType, HiveObjectRef hiveObject) throws TException { firePreEvent(new PreAuthorizationCallEvent(this)); + String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : getDefaultCatalog(conf); if (hiveObject.getObjectType() == null) { - return getAllPrivileges(principalName, principalType); + return getAllPrivileges(principalName, principalType, catName); } if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { return list_global_privileges(principalName, principalType); } if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { - return list_db_privileges(principalName, principalType, hiveObject + return list_db_privileges(principalName, principalType, catName, hiveObject .getDbName()); } if (hiveObject.getObjectType() == HiveObjectType.TABLE) { return list_table_privileges(principalName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName()); + catName, hiveObject.getDbName(), hiveObject.getObjectName()); } if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { return list_partition_privileges(principalName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject + catName, hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject .getPartValues()); } if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { if (hiveObject.getPartValues() == null || hiveObject.getPartValues().isEmpty()) { return list_table_column_privileges(principalName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject.getColumnName()); + catName, hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject.getColumnName()); } return list_partition_column_privileges(principalName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject + catName, hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject .getPartValues(), hiveObject.getColumnName()); } return null; } private List getAllPrivileges(String principalName, - PrincipalType principalType) throws TException { + PrincipalType principalType, String catName) throws TException { List privs = new ArrayList<>(); privs.addAll(list_global_privileges(principalName, principalType)); - privs.addAll(list_db_privileges(principalName, principalType, null)); - privs.addAll(list_table_privileges(principalName, principalType, null, null)); - privs.addAll(list_partition_privileges(principalName, principalType, null, null, null)); - privs.addAll(list_table_column_privileges(principalName, principalType, null, null, null)); + privs.addAll(list_db_privileges(principalName, principalType, catName, null)); + privs.addAll(list_table_privileges(principalName, principalType, catName, null, null)); + privs.addAll(list_partition_privileges(principalName, principalType, catName, null, null, null)); + privs.addAll(list_table_column_privileges(principalName, principalType, catName, null, null, null)); privs.addAll(list_partition_column_privileges(principalName, principalType, - null, null, null, null)); + catName, null, null, null, null)); return privs; } private List list_table_column_privileges( - final String principalName, final PrincipalType principalType, + final String principalName, final PrincipalType principalType, String catName, final String dbName, final String tableName, final String columnName) throws TException { incrementCounter("list_table_column_privileges"); @@ -5867,10 +6270,10 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, return getMS().listPrincipalTableColumnGrantsAll(principalName, principalType); } if (principalName == null) { - return getMS().listTableColumnGrantsAll(dbName, tableName, columnName); + return getMS().listTableColumnGrantsAll(catName, dbName, tableName, columnName); } return getMS().listPrincipalTableColumnGrants(principalName, principalType, - dbName, tableName, columnName); + catName, dbName, tableName, columnName); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -5880,7 +6283,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, private List list_partition_column_privileges( final String principalName, final PrincipalType principalType, - final String dbName, final String tableName, final List partValues, + String catName, final String dbName, final String tableName, final List partValues, final String columnName) throws TException { incrementCounter("list_partition_column_privileges"); @@ -5888,13 +6291,13 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (dbName == null) { return getMS().listPrincipalPartitionColumnGrantsAll(principalName, principalType); } - Table tbl = get_table_core(dbName, tableName); + Table tbl = get_table_core(catName, dbName, tableName); String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); if (principalName == null) { - return getMS().listPartitionColumnGrantsAll(dbName, tableName, partName, columnName); + return getMS().listPartitionColumnGrantsAll(catName, dbName, tableName, partName, columnName); } - return getMS().listPrincipalPartitionColumnGrants(principalName, principalType, dbName, + return getMS().listPrincipalPartitionColumnGrants(principalName, principalType, catName, dbName, tableName, partValues, partName, columnName); } catch (MetaException e) { throw e; @@ -5904,7 +6307,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, } private List list_db_privileges(final String principalName, - final PrincipalType principalType, final String dbName) throws TException { + final PrincipalType principalType, String catName, final String dbName) throws TException { incrementCounter("list_security_db_grant"); try { @@ -5912,9 +6315,9 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, return getMS().listPrincipalDBGrantsAll(principalName, principalType); } if (principalName == null) { - return getMS().listDBGrantsAll(dbName); + return getMS().listDBGrantsAll(catName, dbName); } else { - return getMS().listPrincipalDBGrants(principalName, principalType, dbName); + return getMS().listPrincipalDBGrants(principalName, principalType, catName, dbName); } } catch (MetaException e) { throw e; @@ -5925,7 +6328,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, private List list_partition_privileges( final String principalName, final PrincipalType principalType, - final String dbName, final String tableName, final List partValues) + String catName, final String dbName, final String tableName, final List partValues) throws TException { incrementCounter("list_security_partition_grant"); @@ -5933,13 +6336,13 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (dbName == null) { return getMS().listPrincipalPartitionGrantsAll(principalName, principalType); } - Table tbl = get_table_core(dbName, tableName); + Table tbl = get_table_core(catName, dbName, tableName); String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); if (principalName == null) { - return getMS().listPartitionGrantsAll(dbName, tableName, partName); + return getMS().listPartitionGrantsAll(catName, dbName, tableName, partName); } return getMS().listPrincipalPartitionGrants( - principalName, principalType, dbName, tableName, partValues, partName); + principalName, principalType, catName, dbName, tableName, partValues, partName); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -5949,7 +6352,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, private List list_table_privileges( final String principalName, final PrincipalType principalType, - final String dbName, final String tableName) throws TException { + String catName, final String dbName, final String tableName) throws TException { incrementCounter("list_security_table_grant"); try { @@ -5957,9 +6360,9 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, return getMS().listPrincipalTableGrantsAll(principalName, principalType); } if (principalName == null) { - return getMS().listTableGrantsAll(dbName, tableName); + return getMS().listTableGrantsAll(catName, dbName, tableName); } - return getMS().listAllTableGrants(principalName, principalType, dbName, tableName); + return getMS().listAllTableGrants(principalName, principalType, catName, dbName, tableName); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -6211,10 +6614,14 @@ public void markPartitionForEvent(final String db_name, final String tbl_name, RawStore ms = getMS(); boolean success = false; try { + String[] parsedDbName = parseDbName(db_name, conf); ms.openTransaction(); - startPartitionFunction("markPartitionForEvent", db_name, tbl_name, partName); - firePreEvent(new PreLoadPartitionDoneEvent(db_name, tbl_name, partName, this)); - tbl = ms.markPartitionForEvent(db_name, tbl_name, partName, evtType); + startPartitionFunction("markPartitionForEvent", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, partName); + firePreEvent(new PreLoadPartitionDoneEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, partName, this)); + tbl = ms.markPartitionForEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, + partName, evtType); if (null == tbl) { throw new UnknownTableException("Table: " + tbl_name + " not found."); } @@ -6257,11 +6664,14 @@ public void markPartitionForEvent(final String db_name, final String tbl_name, public boolean isPartitionMarkedForEvent(final String db_name, final String tbl_name, final Map partName, final PartitionEventType evtType) throws TException { - startPartitionFunction("isPartitionMarkedForEvent", db_name, tbl_name, partName); + String[] parsedDbName = parseDbName(db_name, conf); + startPartitionFunction("isPartitionMarkedForEvent", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, partName); Boolean ret = null; Exception ex = null; try { - ret = getMS().isPartitionMarkedForEvent(db_name, tbl_name, partName, evtType); + ret = getMS().isPartitionMarkedForEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, partName, evtType); } catch (Exception original) { LOG.error("Exception caught for isPartitionMarkedForEvent ",original); ex = original; @@ -6341,13 +6751,14 @@ public void create_function(Function func) throws TException { RawStore ms = getMS(); Map transactionalListenerResponses = Collections.emptyMap(); try { + String catName = func.isSetCatName() ? func.getCatName() : getDefaultCatalog(conf); ms.openTransaction(); - Database db = ms.getDatabase(func.getDbName()); + Database db = ms.getDatabase(catName, func.getDbName()); if (db == null) { throw new NoSuchObjectException("The database " + func.getDbName() + " does not exist"); } - Function existingFunc = ms.getFunction(func.getDbName(), func.getFunctionName()); + Function existingFunc = ms.getFunction(catName, func.getDbName(), func.getFunctionName()); if (existingFunc != null) { throw new AlreadyExistsException( "Function " + func.getFunctionName() + " already exists"); @@ -6387,9 +6798,10 @@ public void drop_function(String dbName, String funcName) Function func = null; RawStore ms = getMS(); Map transactionalListenerResponses = Collections.emptyMap(); + String[] parsedDbName = parseDbName(dbName, conf); try { ms.openTransaction(); - func = ms.getFunction(dbName, funcName); + func = ms.getFunction(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], funcName); if (func == null) { throw new NoSuchObjectException("Function " + funcName + " does not exist"); } @@ -6407,7 +6819,7 @@ public void drop_function(String dbName, String funcName) // if the operation on metastore fails, we don't do anything in change management, but fail // the metastore transaction, as having a copy of the jar in change management is not going // to cause any problem, the cleaner thread will remove this when this jar expires. - ms.dropFunction(dbName, funcName); + ms.dropFunction(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], funcName); if (transactionalListeners.size() > 0) { transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, @@ -6435,9 +6847,10 @@ public void alter_function(String dbName, String funcName, Function newFunc) thr validateFunctionInfo(newFunc); boolean success = false; RawStore ms = getMS(); + String[] parsedDbName = parseDbName(dbName, conf); try { ms.openTransaction(); - ms.alterFunction(dbName, funcName, newFunc); + ms.alterFunction(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], funcName, newFunc); success = ms.commitTransaction(); } finally { if (!success) { @@ -6454,9 +6867,10 @@ public void alter_function(String dbName, String funcName, Function newFunc) thr RawStore ms = getMS(); Exception ex = null; List funcNames = null; + String[] parsedDbName = parseDbName(dbName, conf); try { - funcNames = ms.getFunctions(dbName, pattern); + funcNames = ms.getFunctions(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], pattern); } catch (Exception e) { ex = e; throw newMetaException(e); @@ -6476,7 +6890,10 @@ public GetAllFunctionsResponse get_all_functions() List allFunctions = null; Exception ex = null; try { - allFunctions = ms.getAllFunctions(); + // Leaving this as the 'hive' catalog (rather than choosing the default from the + // configuration) because all the default UDFs are in that catalog, and I think that's + // would people really want here. + allFunctions = ms.getAllFunctions(DEFAULT_CATALOG_NAME); } catch (Exception e) { ex = e; throw newMetaException(e); @@ -6494,9 +6911,10 @@ public Function get_function(String dbName, String funcName) throws TException { RawStore ms = getMS(); Function func = null; Exception ex = null; + String[] parsedDbName = parseDbName(dbName, conf); try { - func = ms.getFunction(dbName, funcName); + func = ms.getFunction(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], funcName); if (func == null) { throw new NoSuchObjectException( "Function " + dbName + "." + funcName + " does not exist"); @@ -6658,10 +7076,12 @@ public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( @Override public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws TException { + String catName = request.isSetCatName() ? request.getCatName().toLowerCase() : + getDefaultCatalog(conf); String dbName = request.getDbName().toLowerCase(); String tblName = request.getTblName().toLowerCase(); - startFunction("get_aggr_stats_for", ": db=" + request.getDbName() - + " table=" + request.getTblName()); + startFunction("get_aggr_stats_for", ": table=" + + getCatalogQualifiedTableName(catName, dbName, tblName)); List lowerCaseColNames = new ArrayList<>(request.getColNames().size()); for (String colName : request.getColNames()) { @@ -6674,8 +7094,8 @@ public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws TExce AggrStats aggrStats = null; try { - aggrStats = new AggrStats(getMS().get_aggr_stats_for(dbName, tblName, lowerCasePartNames, - lowerCaseColNames)); + aggrStats = new AggrStats(getMS().get_aggr_stats_for(catName, dbName, tblName, + lowerCasePartNames, lowerCaseColNames)); return aggrStats; } finally { endFunction("get_aggr_stats_for", aggrStats == null, null, request.getTblName()); @@ -6693,6 +7113,7 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc // figure out if it is table level or partition level ColumnStatistics firstColStats = csNews.get(0); ColumnStatisticsDesc statsDesc = firstColStats.getStatsDesc(); + String catName = statsDesc.isSetCatName() ? statsDesc.getCatName() : getDefaultCatalog(conf); String dbName = statsDesc.getDbName(); String tableName = statsDesc.getTableName(); List colNames = new ArrayList<>(); @@ -6708,8 +7129,8 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc } else { if (request.isSetNeedMerge() && request.isNeedMerge()) { // one single call to get all column stats - ColumnStatistics csOld = getMS().getTableColumnStatistics(dbName, tableName, colNames); - Table t = getTable(dbName, tableName); + ColumnStatistics csOld = getMS().getTableColumnStatistics(catName, dbName, tableName, colNames); + Table t = getTable(catName, dbName, tableName); // we first use t.getParameters() to prune the stats MetaStoreUtils.getMergableCols(firstColStats, t.getParameters()); // we merge those that can be merged @@ -6748,8 +7169,8 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc // a single call to get all column stats for all partitions List partitionNames = new ArrayList<>(); partitionNames.addAll(newStatsMap.keySet()); - List csOlds = getMS().getPartitionColumnStatistics(dbName, tableName, - partitionNames, colNames); + List csOlds = getMS().getPartitionColumnStatistics(catName, dbName, + tableName, partitionNames, colNames); if (newStatsMap.values().size() != csOlds.size()) { // some of the partitions miss stats. LOG.debug("Some of the partitions miss stats."); @@ -6758,12 +7179,12 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc oldStatsMap.put(csOld.getStatsDesc().getPartName(), csOld); } // another single call to get all the partition objects - partitions = getMS().getPartitionsByNames(dbName, tableName, partitionNames); + partitions = getMS().getPartitionsByNames(catName, dbName, tableName, partitionNames); for (int index = 0; index < partitionNames.size(); index++) { mapToPart.put(partitionNames.get(index), partitions.get(index)); } } - Table t = getTable(dbName, tableName); + Table t = getTable(catName, dbName, tableName); for (Entry entry : newStatsMap.entrySet()) { ColumnStatistics csNew = entry.getValue(); ColumnStatistics csOld = oldStatsMap.get(entry.getKey()); @@ -6788,11 +7209,11 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc return ret; } - private Table getTable(String dbName, String tableName) + private Table getTable(String catName, String dbName, String tableName) throws MetaException, InvalidObjectException { - Table t = getMS().getTable(dbName, tableName); + Table t = getMS().getTable(catName, dbName, tableName); if (t == null) { - throw new InvalidObjectException(dbName + "." + tableName + throw new InvalidObjectException(getCatalogQualifiedTableName(catName, dbName, tableName) + " table not found"); } return t; @@ -6864,9 +7285,10 @@ private void authorizeProxyPrivilege() throws Exception { public FireEventResponse fire_listener_event(FireEventRequest rqst) throws TException { switch (rqst.getData().getSetField()) { case INSERT_DATA: + String catName = rqst.isSetCatName() ? rqst.getCatName() : getDefaultCatalog(conf); InsertEvent event = - new InsertEvent(rqst.getDbName(), rqst.getTableName(), rqst.getPartitionVals(), rqst - .getData().getInsertData(), rqst.isSuccessful(), this); + new InsertEvent(catName, rqst.getDbName(), rqst.getTableName(), rqst.getPartitionVals(), + rqst.getData().getInsertData(), rqst.isSuccessful(), this); /* * The transactional listener response will be set already on the event, so there is not need @@ -6997,7 +7419,7 @@ public CacheFileMetadataResult cache_file_metadata( ms.openTransaction(); boolean success = false; try { - Table tbl = ms.getTable(dbName, tblName); + Table tbl = ms.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); if (tbl == null) { throw new NoSuchObjectException(dbName + "." + tblName + " not found"); } @@ -7022,7 +7444,7 @@ public CacheFileMetadataResult cache_file_metadata( if (partName != null) { partNames = Lists.newArrayList(partName); } else if (isAllPart) { - partNames = ms.listPartitionNames(dbName, tblName, (short)-1); + partNames = ms.listPartitionNames(DEFAULT_CATALOG_NAME, dbName, tblName, (short)-1); } else { throw new MetaException("Table is partitioned"); } @@ -7035,7 +7457,7 @@ public CacheFileMetadataResult cache_file_metadata( int currentBatchSize = Math.min(batchSize, partNames.size() - index); List nameBatch = partNames.subList(index, index + currentBatchSize); index += currentBatchSize; - List parts = ms.getPartitionsByNames(dbName, tblName, nameBatch); + List parts = ms.getPartitionsByNames(DEFAULT_CATALOG_NAME, dbName, tblName, nameBatch); for (Partition part : parts) { if (!part.isSetSd() || !part.getSd().isSetLocation()) { throw new MetaException("Partition does not have storage location;" + @@ -7094,13 +7516,14 @@ void updateMetrics() throws MetaException { @Override public PrimaryKeysResponse get_primary_keys(PrimaryKeysRequest request) throws TException { + String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); String db_name = request.getDb_name(); String tbl_name = request.getTbl_name(); - startTableFunction("get_primary_keys", db_name, tbl_name); + startTableFunction("get_primary_keys", catName, db_name, tbl_name); List ret = null; Exception ex = null; try { - ret = getMS().getPrimaryKeys(db_name, tbl_name); + ret = getMS().getPrimaryKeys(catName, db_name, tbl_name); } catch (Exception e) { ex = e; throwMetaException(e); @@ -7112,6 +7535,7 @@ public PrimaryKeysResponse get_primary_keys(PrimaryKeysRequest request) throws T @Override public ForeignKeysResponse get_foreign_keys(ForeignKeysRequest request) throws TException { + String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); String parent_db_name = request.getParent_db_name(); String parent_tbl_name = request.getParent_tbl_name(); String foreign_db_name = request.getForeign_db_name(); @@ -7122,7 +7546,7 @@ public ForeignKeysResponse get_foreign_keys(ForeignKeysRequest request) throws T List ret = null; Exception ex = null; try { - ret = getMS().getForeignKeys(parent_db_name, parent_tbl_name, + ret = getMS().getForeignKeys(catName, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); } catch (Exception e) { ex = e; @@ -7147,13 +7571,14 @@ private void throwMetaException(Exception e) throws MetaException, @Override public UniqueConstraintsResponse get_unique_constraints(UniqueConstraintsRequest request) throws TException { + String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); String db_name = request.getDb_name(); String tbl_name = request.getTbl_name(); - startTableFunction("get_unique_constraints", db_name, tbl_name); + startTableFunction("get_unique_constraints", catName, db_name, tbl_name); List ret = null; Exception ex = null; try { - ret = getMS().getUniqueConstraints(db_name, tbl_name); + ret = getMS().getUniqueConstraints(catName, db_name, tbl_name); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -7170,13 +7595,14 @@ public UniqueConstraintsResponse get_unique_constraints(UniqueConstraintsRequest @Override public NotNullConstraintsResponse get_not_null_constraints(NotNullConstraintsRequest request) throws TException { + String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); String db_name = request.getDb_name(); String tbl_name = request.getTbl_name(); - startTableFunction("get_not_null_constraints", db_name, tbl_name); + startTableFunction("get_not_null_constraints", catName, db_name, tbl_name); List ret = null; Exception ex = null; try { - ret = getMS().getNotNullConstraints(db_name, tbl_name); + ret = getMS().getNotNullConstraints(catName, db_name, tbl_name); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -7193,13 +7619,14 @@ public NotNullConstraintsResponse get_not_null_constraints(NotNullConstraintsReq @Override public DefaultConstraintsResponse get_default_constraints(DefaultConstraintsRequest request) throws TException { + String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); String db_name = request.getDb_name(); String tbl_name = request.getTbl_name(); - startTableFunction("get_default_constraints", db_name, tbl_name); + startTableFunction("get_default_constraints", catName, db_name, tbl_name); List ret = null; Exception ex = null; try { - ret = getMS().getDefaultConstraints(db_name, tbl_name); + ret = getMS().getDefaultConstraints(catName, db_name, tbl_name); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -7216,13 +7643,14 @@ public DefaultConstraintsResponse get_default_constraints(DefaultConstraintsRequ @Override public CheckConstraintsResponse get_check_constraints(CheckConstraintsRequest request) throws TException { + String catName = request.getCatName(); String db_name = request.getDb_name(); String tbl_name = request.getTbl_name(); - startTableFunction("get_check_constraints", db_name, tbl_name); + startTableFunction("get_check_constraints", catName, db_name, tbl_name); List ret = null; Exception ex = null; try { - ret = getMS().getCheckConstraints(db_name, tbl_name); + ret = getMS().getCheckConstraints(catName, db_name, tbl_name); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -8190,7 +8618,6 @@ public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, // Initialize materializations invalidation cache MaterializationsInvalidationCache.get().init(conf, handler); - TServerSocket serverSocket; if (useSasl) { diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index ae42077297..be5090d7d9 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -19,6 +19,8 @@ package org.apache.hadoop.hive.metastore; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName; import java.io.IOException; import java.lang.reflect.Constructor; @@ -70,6 +72,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.thrift.TApplicationException; +import org.apache.thrift.TBase; import org.apache.thrift.TException; import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.protocol.TCompactProtocol; @@ -112,7 +115,6 @@ private URI metastoreUris[]; private final HiveMetaHookLoader hookLoader; protected final Configuration conf; // Keep a copy of HiveConf so if Session conf changes, we may need to get a new HMS client. - protected boolean fastpath = false; private String tokenStrForm; private final boolean localMetaStore; private final MetaStoreFilterHook filterHook; @@ -379,27 +381,14 @@ public void reconnect() throws MetaException { } } - /** - * @param dbname - * @param tbl_name - * @param new_tbl - * @throws InvalidOperationException - * @throws MetaException - * @throws TException - * @see - * org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table( - * java.lang.String, java.lang.String, - * org.apache.hadoop.hive.metastore.api.Table) - */ @Override - public void alter_table(String dbname, String tbl_name, Table new_tbl) - throws InvalidOperationException, MetaException, TException { + public void alter_table(String dbname, String tbl_name, Table new_tbl) throws TException { alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null); } @Override public void alter_table(String defaultDatabaseName, String tblName, Table table, - boolean cascade) throws InvalidOperationException, MetaException, TException { + boolean cascade) throws TException { EnvironmentContext environmentContext = new EnvironmentContext(); if (cascade) { environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); @@ -409,25 +398,29 @@ public void alter_table(String defaultDatabaseName, String tblName, Table table, @Override public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl, - EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { - client.alter_table_with_environment_context(dbname, tbl_name, new_tbl, envContext); + EnvironmentContext envContext) throws TException { + client.alter_table_with_environment_context(prependCatalogToDbName(dbname, conf), + tbl_name, new_tbl, envContext); } - /** - * @param dbname - * @param name - * @param part_vals - * @param newPart - * @throws InvalidOperationException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#rename_partition( - * java.lang.String, java.lang.String, java.util.List, org.apache.hadoop.hive.metastore.api.Partition) - */ @Override - public void renamePartition(final String dbname, final String name, final List part_vals, final Partition newPart) - throws InvalidOperationException, MetaException, TException { - client.rename_partition(dbname, name, part_vals, newPart); + public void alter_table(String catName, String dbName, String tblName, Table newTable, + EnvironmentContext envContext) throws TException { + client.alter_table_with_environment_context(prependCatalogToDbName(catName, + dbName, conf), tblName, newTable, envContext); + } + + @Override + public void renamePartition(final String dbname, final String tableName, final List part_vals, + final Partition newPart) throws TException { + renamePartition(getDefaultCatalog(conf), dbname, tableName, part_vals, newPart); + } + + @Override + public void renamePartition(String catName, String dbname, String tableName, List part_vals, + Partition newPart) throws TException { + client.rename_partition(prependCatalogToDbName(catName, dbname, conf), tableName, part_vals, newPart); + } private void open() throws MetaException { @@ -615,6 +608,28 @@ public String getMetaConf(String key) throws TException { return client.getMetaConf(key); } + @Override + public void createCatalog(Catalog catalog) throws TException { + client.create_catalog(new CreateCatalogRequest(catalog)); + } + + @Override + public Catalog getCatalog(String catName) throws TException { + GetCatalogResponse rsp = client.get_catalog(new GetCatalogRequest(catName)); + return rsp == null ? null : filterHook.filterCatalog(rsp.getCatalog()); + } + + @Override + public List getCatalogs() throws TException { + GetCatalogsResponse rsp = client.get_catalogs(); + return rsp == null ? null : filterHook.filterCatalogs(rsp.getNames()); + } + + @Override + public void dropCatalog(String catName) throws TException { + client.drop_catalog(new DropCatalogRequest(catName)); + } + /** * @param new_part * @return the added partition @@ -631,8 +646,9 @@ public Partition add_partition(Partition new_part) throws TException { public Partition add_partition(Partition new_part, EnvironmentContext envContext) throws TException { + if (!new_part.isSetCatName()) new_part.setCatName(getDefaultCatalog(conf)); Partition p = client.add_partition_with_environment_context(new_part, envContext); - return fastpath ? p : deepCopy(p); + return deepCopy(p); } /** @@ -645,6 +661,10 @@ public Partition add_partition(Partition new_part, EnvironmentContext envContext */ @Override public int add_partitions(List new_parts) throws TException { + if (new_parts != null && !new_parts.isEmpty() && !new_parts.get(0).isSetCatName()) { + final String defaultCat = getDefaultCatalog(conf); + new_parts.forEach(p -> p.setCatName(defaultCat)); + } return client.add_partitions(new_parts); } @@ -657,6 +677,7 @@ public int add_partitions(List new_parts) throws TException { Partition part = parts.get(0); AddPartitionsRequest req = new AddPartitionsRequest( part.getDbName(), part.getTableName(), parts, ifNotExists); + req.setCatName(part.isSetCatName() ? part.getCatName() : getDefaultCatalog(conf)); req.setNeedResult(needResults); AddPartitionsResult result = client.add_partitions_req(req); return needResults ? filterHook.filterPartitions(result.getPartitions()) : null; @@ -664,45 +685,43 @@ public int add_partitions(List new_parts) throws TException { @Override public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException { + if (partitionSpec.getCatName() == null) partitionSpec.setCatName(getDefaultCatalog(conf)); return client.add_partitions_pspec(partitionSpec.toPartitionSpec()); } - /** - * @param table_name - * @param db_name - * @param part_vals - * @return the appended partition - * @throws InvalidObjectException - * @throws AlreadyExistsException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String, - * java.lang.String, java.util.List) - */ @Override public Partition appendPartition(String db_name, String table_name, List part_vals) throws TException { - return appendPartition(db_name, table_name, part_vals, null); - } - - public Partition appendPartition(String db_name, String table_name, List part_vals, - EnvironmentContext envContext) throws TException { - Partition p = client.append_partition_with_environment_context(db_name, table_name, - part_vals, envContext); - return fastpath ? p : deepCopy(p); + return appendPartition(getDefaultCatalog(conf), db_name, table_name, part_vals); } @Override public Partition appendPartition(String dbName, String tableName, String partName) throws TException { - return appendPartition(dbName, tableName, partName, null); + return appendPartition(getDefaultCatalog(conf), dbName, tableName, partName); } - public Partition appendPartition(String dbName, String tableName, String partName, - EnvironmentContext envContext) throws TException { - Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName, - partName, envContext); - return fastpath ? p : deepCopy(p); + @Override + public Partition appendPartition(String catName, String dbName, String tableName, + String name) throws TException { + Partition p = client.append_partition_by_name(prependCatalogToDbName( + catName, dbName, conf), tableName, name); + return deepCopy(p); + } + + @Override + public Partition appendPartition(String catName, String dbName, String tableName, + List partVals) throws TException { + Partition p = client.append_partition(prependCatalogToDbName( + catName, dbName, conf), tableName, partVals); + return deepCopy(p); + } + + @Deprecated + public Partition appendPartition(String dbName, String tableName, List partVals, + EnvironmentContext ec) throws TException { + return client.append_partition_with_environment_context(prependCatalogToDbName(dbName, conf), + tableName, partVals, ec).deepCopy(); } /** @@ -715,10 +734,17 @@ public Partition appendPartition(String dbName, String tableName, String partNam @Override public Partition exchange_partition(Map partitionSpecs, String sourceDb, String sourceTable, String destDb, - String destinationTableName) throws MetaException, - NoSuchObjectException, InvalidObjectException, TException { - return client.exchange_partition(partitionSpecs, sourceDb, sourceTable, - destDb, destinationTableName); + String destinationTableName) throws TException { + return exchange_partition(partitionSpecs, getDefaultCatalog(conf), sourceDb, sourceTable, + getDefaultCatalog(conf), destDb, destinationTableName); + } + + @Override + public Partition exchange_partition(Map partitionSpecs, String sourceCat, + String sourceDb, String sourceTable, String destCat, + String destDb, String destTableName) throws TException { + return client.exchange_partition(partitionSpecs, prependCatalogToDbName(sourceCat, sourceDb, conf), + sourceTable, prependCatalogToDbName(destCat, destDb, conf), destTableName); } /** @@ -731,10 +757,17 @@ public Partition exchange_partition(Map partitionSpecs, @Override public List exchange_partitions(Map partitionSpecs, String sourceDb, String sourceTable, String destDb, - String destinationTableName) throws MetaException, - NoSuchObjectException, InvalidObjectException, TException { - return client.exchange_partitions(partitionSpecs, sourceDb, sourceTable, - destDb, destinationTableName); + String destinationTableName) throws TException { + return exchange_partitions(partitionSpecs, getDefaultCatalog(conf), sourceDb, sourceTable, + getDefaultCatalog(conf), destDb, destinationTableName); + } + + @Override + public List exchange_partitions(Map partitionSpecs, String sourceCat, + String sourceDb, String sourceTable, String destCat, + String destDb, String destTableName) throws TException { + return client.exchange_partitions(partitionSpecs, prependCatalogToDbName(sourceCat, sourceDb, conf), + sourceTable, prependCatalogToDbName(destCat, destDb, conf), destTableName); } @Override @@ -755,6 +788,7 @@ public void validatePartitionNameCharacters(List partVals) @Override public void createDatabase(Database db) throws AlreadyExistsException, InvalidObjectException, MetaException, TException { + if (!db.isSetCatalogName()) db.setCatalogName(getDefaultCatalog(conf)); client.create_database(db); } @@ -773,6 +807,7 @@ public void createTable(Table tbl) throws AlreadyExistsException, public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { + if (!tbl.isSetCatName()) tbl.setCatName(getDefaultCatalog(conf)); HiveMetaHook hook = getHook(tbl); if (hook != null) { hook.preCreateTable(tbl); @@ -797,7 +832,6 @@ public void createTable(Table tbl, EnvironmentContext envContext) throws Already } } - @Override public void createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, @@ -806,6 +840,17 @@ public void createTableWithConstraints(Table tbl, List checkConstraints) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { + + if (!tbl.isSetCatName()) { + String defaultCat = getDefaultCatalog(conf); + tbl.setCatName(defaultCat); + if (primaryKeys != null) primaryKeys.forEach(pk -> pk.setCatName(defaultCat)); + if (foreignKeys != null) foreignKeys.forEach(fk -> fk.setCatName(defaultCat)); + if (uniqueConstraints != null) uniqueConstraints.forEach(uc -> uc.setCatName(defaultCat)); + if (notNullConstraints != null) notNullConstraints.forEach(nn -> nn.setCatName(defaultCat)); + if (defaultConstraints != null) defaultConstraints.forEach(def -> def.setCatName(defaultCat)); + if (checkConstraints != null) checkConstraints.forEach(cc -> cc.setCatName(defaultCat)); + } HiveMetaHook hook = getHook(tbl); if (hook != null) { hook.preCreateTable(tbl); @@ -827,44 +872,74 @@ public void createTableWithConstraints(Table tbl, } @Override - public void dropConstraint(String dbName, String tableName, String constraintName) throws - NoSuchObjectException, MetaException, TException { - client.drop_constraint(new DropConstraintRequest(dbName, tableName, constraintName)); + public void dropConstraint(String dbName, String tableName, String constraintName) + throws TException { + dropConstraint(getDefaultCatalog(conf), dbName, tableName, constraintName); } @Override - public void addPrimaryKey(List primaryKeyCols) throws - NoSuchObjectException, MetaException, TException { + public void dropConstraint(String catName, String dbName, String tableName, String constraintName) + throws TException { + DropConstraintRequest rqst = new DropConstraintRequest(dbName, tableName, constraintName); + rqst.setCatName(catName); + client.drop_constraint(rqst); + } + + @Override + public void addPrimaryKey(List primaryKeyCols) throws TException { + if (!primaryKeyCols.isEmpty() && !primaryKeyCols.get(0).isSetCatName()) { + String defaultCat = getDefaultCatalog(conf); + primaryKeyCols.forEach(pk -> pk.setCatName(defaultCat)); + } client.add_primary_key(new AddPrimaryKeyRequest(primaryKeyCols)); } @Override - public void addForeignKey(List foreignKeyCols) throws - NoSuchObjectException, MetaException, TException { + public void addForeignKey(List foreignKeyCols) throws TException { + if (!foreignKeyCols.isEmpty() && !foreignKeyCols.get(0).isSetCatName()) { + String defaultCat = getDefaultCatalog(conf); + foreignKeyCols.forEach(fk -> fk.setCatName(defaultCat)); + } client.add_foreign_key(new AddForeignKeyRequest(foreignKeyCols)); } @Override public void addUniqueConstraint(List uniqueConstraintCols) throws NoSuchObjectException, MetaException, TException { + if (!uniqueConstraintCols.isEmpty() && !uniqueConstraintCols.get(0).isSetCatName()) { + String defaultCat = getDefaultCatalog(conf); + uniqueConstraintCols.forEach(uc -> uc.setCatName(defaultCat)); + } client.add_unique_constraint(new AddUniqueConstraintRequest(uniqueConstraintCols)); } @Override public void addNotNullConstraint(List notNullConstraintCols) throws NoSuchObjectException, MetaException, TException { + if (!notNullConstraintCols.isEmpty() && !notNullConstraintCols.get(0).isSetCatName()) { + String defaultCat = getDefaultCatalog(conf); + notNullConstraintCols.forEach(nn -> nn.setCatName(defaultCat)); + } client.add_not_null_constraint(new AddNotNullConstraintRequest(notNullConstraintCols)); } @Override public void addDefaultConstraint(List defaultConstraints) throws NoSuchObjectException, MetaException, TException { + if (!defaultConstraints.isEmpty() && !defaultConstraints.get(0).isSetCatName()) { + String defaultCat = getDefaultCatalog(conf); + defaultConstraints.forEach(def -> def.setCatName(defaultCat)); + } client.add_default_constraint(new AddDefaultConstraintRequest(defaultConstraints)); } @Override public void addCheckConstraint(List checkConstraints) throws NoSuchObjectException, MetaException, TException { + if (!checkConstraints.isEmpty() && !checkConstraints.get(0).isSetCatName()) { + String defaultCat = getDefaultCatalog(conf); + checkConstraints.forEach(cc -> cc.setCatName(defaultCat)); + } client.add_check_constraint(new AddCheckConstraintRequest(checkConstraints)); } @@ -893,20 +968,26 @@ public boolean createType(Type type) throws AlreadyExistsException, @Override public void dropDatabase(String name) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { - dropDatabase(name, true, false, false); + dropDatabase(getDefaultCatalog(conf), name, true, false, false); } @Override public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { - dropDatabase(name, deleteData, ignoreUnknownDb, false); + dropDatabase(getDefaultCatalog(conf), name, deleteData, ignoreUnknownDb, false); } @Override public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + dropDatabase(getDefaultCatalog(conf), name, deleteData, ignoreUnknownDb, cascade); + } + + public void dropDatabase(String catalogName, String dbName, boolean deleteData, + boolean ignoreUnknownDb, boolean cascade) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { try { - getDatabase(name); + getDatabase(catalogName, dbName); } catch (NoSuchObjectException e) { if (!ignoreUnknownDb) { throw e; @@ -915,45 +996,30 @@ public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownD } if (cascade) { - List tableList = getAllTables(name); + List tableList = getAllTables(dbName); for (String table : tableList) { try { // Subclasses can override this step (for example, for temporary tables) - dropTable(name, table, deleteData, true); + dropTable(dbName, table, deleteData, true); } catch (UnsupportedOperationException e) { // Ignore Index tables, those will be dropped with parent tables } } } - client.drop_database(name, deleteData, cascade); - } - - /** - * @param tbl_name - * @param db_name - * @param part_vals - * @return true or false - * @throws NoSuchObjectException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, - * java.lang.String, java.util.List, boolean) - */ - public boolean dropPartition(String db_name, String tbl_name, - List part_vals) throws NoSuchObjectException, MetaException, - TException { - return dropPartition(db_name, tbl_name, part_vals, true, null); + client.drop_database(prependCatalogToDbName(catalogName, dbName, conf), deleteData, cascade); } - public boolean dropPartition(String db_name, String tbl_name, List part_vals, - EnvironmentContext env_context) throws NoSuchObjectException, MetaException, TException { - return dropPartition(db_name, tbl_name, part_vals, true, env_context); + @Override + public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData) + throws TException { + return dropPartition(getDefaultCatalog(conf), dbName, tableName, partName, deleteData); } @Override - public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData) - throws NoSuchObjectException, MetaException, TException { - return dropPartition(dbName, tableName, partName, deleteData, null); + public boolean dropPartition(String catName, String db_name, String tbl_name, String name, + boolean deleteData) throws TException { + return client.drop_partition_by_name_with_environment_context(prependCatalogToDbName( + catName, db_name, conf), tbl_name, name, deleteData, null); } private static EnvironmentContext getEnvironmentContextWithIfPurgeSet() { @@ -962,54 +1028,57 @@ private static EnvironmentContext getEnvironmentContextWithIfPurgeSet() { return new EnvironmentContext(warehouseOptions); } - /* - public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData, boolean ifPurge) - throws NoSuchObjectException, MetaException, TException { + // A bunch of these are in HiveMetaStoreClient but not IMetaStoreClient. I have marked these + // as deprecated and not updated them for the catalogs. If we really want to support them we + // should add them to IMetaStoreClient. - return dropPartition(dbName, tableName, partName, deleteData, - ifPurge? getEnvironmentContextWithIfPurgeSet() : null); + @Deprecated + public boolean dropPartition(String db_name, String tbl_name, List part_vals, + EnvironmentContext env_context) throws TException { + return client.drop_partition_with_environment_context(prependCatalogToDbName(db_name, conf), + tbl_name, part_vals, true, env_context); } - */ - public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData, - EnvironmentContext envContext) throws NoSuchObjectException, MetaException, TException { - return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName, - deleteData, envContext); + @Deprecated + public boolean dropPartition(String dbName, String tableName, String partName, boolean dropData, + EnvironmentContext ec) throws TException { + return client.drop_partition_by_name_with_environment_context(prependCatalogToDbName(dbName, conf), + tableName, partName, dropData, ec); + } + + @Deprecated + public boolean dropPartition(String dbName, String tableName, List partVals) + throws TException { + return client.drop_partition(prependCatalogToDbName(dbName, conf), tableName, partVals, true); } - /** - * @param db_name - * @param tbl_name - * @param part_vals - * @param deleteData - * delete the underlying data or just delete the table in metadata - * @return true or false - * @throws NoSuchObjectException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, - * java.lang.String, java.util.List, boolean) - */ @Override public boolean dropPartition(String db_name, String tbl_name, - List part_vals, boolean deleteData) throws NoSuchObjectException, - MetaException, TException { - return dropPartition(db_name, tbl_name, part_vals, deleteData, null); + List part_vals, boolean deleteData) throws TException { + return dropPartition(getDefaultCatalog(conf), db_name, tbl_name, part_vals, + PartitionDropOptions.instance().deleteData(deleteData)); + } + + @Override + public boolean dropPartition(String catName, String db_name, String tbl_name, + List part_vals, boolean deleteData) throws TException { + return dropPartition(catName, db_name, tbl_name, part_vals, PartitionDropOptions.instance() + .deleteData(deleteData)); } @Override public boolean dropPartition(String db_name, String tbl_name, - List part_vals, PartitionDropOptions options) throws TException { + List part_vals, PartitionDropOptions options) throws TException { + return dropPartition(getDefaultCatalog(conf), db_name, tbl_name, part_vals, options); + } + + @Override + public boolean dropPartition(String catName, String db_name, String tbl_name, + List part_vals, PartitionDropOptions options) + throws TException { if (options == null) { options = PartitionDropOptions.instance(); } - return dropPartition(db_name, tbl_name, part_vals, options.deleteData, - options.purgeData? getEnvironmentContextWithIfPurgeSet() : null); - } - - public boolean dropPartition(String db_name, String tbl_name, List part_vals, - boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException, - MetaException, TException { if (part_vals != null) { for (String partVal : part_vals) { if (partVal == null) { @@ -1017,32 +1086,17 @@ public boolean dropPartition(String db_name, String tbl_name, List part_ } } } - return client.drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, - envContext); + return client.drop_partition_with_environment_context(prependCatalogToDbName( + catName, db_name, conf), tbl_name, part_vals, options.deleteData, + options.purgeData ? getEnvironmentContextWithIfPurgeSet() : null); } @Override public List dropPartitions(String dbName, String tblName, - List> partExprs, PartitionDropOptions options) + List> partExprs, + PartitionDropOptions options) throws TException { - RequestPartsSpec rps = new RequestPartsSpec(); - List exprs = new ArrayList<>(partExprs.size()); - for (ObjectPair partExpr : partExprs) { - DropPartitionsExpr dpe = new DropPartitionsExpr(); - dpe.setExpr(partExpr.getSecond()); - dpe.setPartArchiveLevel(partExpr.getFirst()); - exprs.add(dpe); - } - rps.setExprs(exprs); - DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps); - req.setDeleteData(options.deleteData); - req.setNeedResult(options.returnResults); - req.setIfExists(options.ifExists); - if (options.purgeData) { - LOG.info("Dropped partitions will be purged!"); - req.setEnvironmentContext(getEnvironmentContextWithIfPurgeSet()); - } - return client.drop_partitions_req(req).getPartitions(); + return dropPartitions(getDefaultCatalog(conf), dbName, tblName, partExprs, options); } @Override @@ -1050,7 +1104,7 @@ public boolean dropPartition(String db_name, String tbl_name, List part_ List> partExprs, boolean deleteData, boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException { - return dropPartitions(dbName, tblName, partExprs, + return dropPartitions(getDefaultCatalog(conf), dbName, tblName, partExprs, PartitionDropOptions.instance() .deleteData(deleteData) .ifExists(ifExists) @@ -1063,33 +1117,58 @@ public boolean dropPartition(String db_name, String tbl_name, List part_ List> partExprs, boolean deleteData, boolean ifExists) throws NoSuchObjectException, MetaException, TException { // By default, we need the results from dropPartitions(); - return dropPartitions(dbName, tblName, partExprs, + return dropPartitions(getDefaultCatalog(conf), dbName, tblName, partExprs, PartitionDropOptions.instance() .deleteData(deleteData) .ifExists(ifExists)); } - /** - * {@inheritDoc} - * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) - */ + @Override + public List dropPartitions(String catName, String dbName, String tblName, + List> partExprs, + PartitionDropOptions options) throws TException { + RequestPartsSpec rps = new RequestPartsSpec(); + List exprs = new ArrayList<>(partExprs.size()); + for (ObjectPair partExpr : partExprs) { + DropPartitionsExpr dpe = new DropPartitionsExpr(); + dpe.setExpr(partExpr.getSecond()); + dpe.setPartArchiveLevel(partExpr.getFirst()); + exprs.add(dpe); + } + rps.setExprs(exprs); + DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps); + req.setCatName(catName); + req.setDeleteData(options.deleteData); + req.setNeedResult(options.returnResults); + req.setIfExists(options.ifExists); + if (options.purgeData) { + LOG.info("Dropped partitions will be purged!"); + req.setEnvironmentContext(getEnvironmentContextWithIfPurgeSet()); + } + return client.drop_partitions_req(req).getPartitions(); + } + @Override public void dropTable(String dbname, String name, boolean deleteData, boolean ignoreUnknownTab) throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException { - dropTable(dbname, name, deleteData, ignoreUnknownTab, null); + dropTable(getDefaultCatalog(conf), dbname, name, deleteData, ignoreUnknownTab, null); } - /** - * Drop the table and choose whether to save the data in the trash. - * @param ifPurge completely purge the table (skipping trash) while removing - * data from warehouse - * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) - */ @Override public void dropTable(String dbname, String name, boolean deleteData, - boolean ignoreUnknownTab, boolean ifPurge) - throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException { + boolean ignoreUnknownTab, boolean ifPurge) throws TException { + dropTable(getDefaultCatalog(conf), dbname, name, deleteData, ignoreUnknownTab, ifPurge); + } + + @Override + public void dropTable(String dbname, String name) throws TException { + dropTable(getDefaultCatalog(conf), dbname, name, true, true, null); + } + + @Override + public void dropTable(String catName, String dbName, String tableName, boolean deleteData, + boolean ignoreUnknownTable, boolean ifPurge) throws TException { //build new environmentContext with ifPurge; EnvironmentContext envContext = null; if(ifPurge){ @@ -1098,32 +1177,17 @@ public void dropTable(String dbname, String name, boolean deleteData, warehouseOptions.put("ifPurge", "TRUE"); envContext = new EnvironmentContext(warehouseOptions); } - dropTable(dbname, name, deleteData, ignoreUnknownTab, envContext); - } - - /** {@inheritDoc} */ - @Override - @Deprecated - public void dropTable(String tableName, boolean deleteData) - throws MetaException, UnknownTableException, TException, NoSuchObjectException { - dropTable(DEFAULT_DATABASE_NAME, tableName, deleteData, false, null); - } + dropTable(catName, dbName, tableName, deleteData, ignoreUnknownTable, envContext); - /** - * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) - */ - @Override - public void dropTable(String dbname, String name) - throws NoSuchObjectException, MetaException, TException { - dropTable(dbname, name, true, true, null); } /** * Drop the table and choose whether to: delete the underlying table data; * throw if the table doesn't exist; save the data in the trash. * - * @param dbname - * @param name + * @param catName catalog name + * @param dbname database name + * @param name table name * @param deleteData * delete the underlying data or just delete the table in metadata * @param ignoreUnknownTab @@ -1141,12 +1205,12 @@ public void dropTable(String dbname, String name) * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String, * java.lang.String, boolean) */ - public void dropTable(String dbname, String name, boolean deleteData, + public void dropTable(String catName, String dbname, String name, boolean deleteData, boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException { Table tbl; try { - tbl = getTable(dbname, name); + tbl = getTable(catName, dbname, name); } catch (NoSuchObjectException e) { if (!ignoreUnknownTab) { throw e; @@ -1159,7 +1223,7 @@ public void dropTable(String dbname, String name, boolean deleteData, } boolean success = false; try { - drop_table_with_environment_context(dbname, name, deleteData, envContext); + drop_table_with_environment_context(catName, dbname, name, deleteData, envContext); if (hook != null) { hook.commitDropTable(tbl, deleteData || (envContext != null && "TRUE".equals(envContext.getProperties().get("ifPurge")))); } @@ -1175,21 +1239,15 @@ public void dropTable(String dbname, String name, boolean deleteData, } } - /** - * Truncate the table/partitions in the DEFAULT database. - * @param dbName - * The db to which the table to be truncate belongs to - * @param tableName - * The table to truncate - * @param partNames - * List of partitions to truncate. NULL will truncate the whole table/all partitions - * @throws MetaException - * @throws TException - * Could not truncate table properly. - */ @Override - public void truncateTable(String dbName, String tableName, List partNames) throws MetaException, TException { - client.truncate_table(dbName, tableName, partNames); + public void truncateTable(String dbName, String tableName, List partNames) throws TException { + truncateTable(getDefaultCatalog(conf), dbName, tableName, partNames); + } + + @Override + public void truncateTable(String catName, String dbName, String tableName, List partNames) + throws TException { + client.truncate_table(prependCatalogToDbName(catName, dbName, conf), tableName, partNames); } /** @@ -1235,111 +1293,144 @@ public boolean dropType(String type) throws NoSuchObjectException, MetaException return result; } - /** {@inheritDoc} */ @Override - public List getDatabases(String databasePattern) - throws MetaException { - try { - return filterHook.filterDatabases(client.get_databases(databasePattern)); - } catch (Exception e) { - MetaStoreUtils.logAndThrowMetaException(e); - } - return null; + public List getDatabases(String databasePattern) throws TException { + return getDatabases(getDefaultCatalog(conf), databasePattern); } - /** {@inheritDoc} */ @Override - public List getAllDatabases() throws MetaException { - try { - return filterHook.filterDatabases(client.get_all_databases()); - } catch (Exception e) { - MetaStoreUtils.logAndThrowMetaException(e); - } - return null; + public List getDatabases(String catName, String databasePattern) throws TException { + return filterHook.filterDatabases(client.get_databases(prependCatalogToDbName( + catName, databasePattern, conf))); } - /** - * @param tbl_name - * @param db_name - * @param max_parts - * @return list of partitions - * @throws NoSuchObjectException - * @throws MetaException - * @throws TException - */ @Override - public List listPartitions(String db_name, String tbl_name, - short max_parts) throws NoSuchObjectException, MetaException, TException { - List parts = client.get_partitions(db_name, tbl_name, max_parts); - return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + public List getAllDatabases() throws TException { + return getAllDatabases(getDefaultCatalog(conf)); + } + + @Override + public List getAllDatabases(String catName) throws TException { + return filterHook.filterDatabases(client.get_databases(prependCatalogToDbName(catName, null, conf))); + } + + @Override + public List listPartitions(String db_name, String tbl_name, short max_parts) + throws TException { + return listPartitions(getDefaultCatalog(conf), db_name, tbl_name, max_parts); + } + + @Override + public List listPartitions(String catName, String db_name, String tbl_name, + int max_parts) throws TException { + List parts = client.get_partitions(prependCatalogToDbName(catName, db_name, conf), + tbl_name, shrinkMaxtoShort(max_parts)); + return deepCopyPartitions(filterHook.filterPartitions(parts)); } @Override public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException { + return listPartitionSpecs(getDefaultCatalog(conf), dbName, tableName, maxParts); + } + + @Override + public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName, + int maxParts) throws TException { return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( - client.get_partitions_pspec(dbName, tableName, maxParts))); + client.get_partitions_pspec(prependCatalogToDbName(catName, dbName, conf), tableName, maxParts))); } @Override public List listPartitions(String db_name, String tbl_name, - List part_vals, short max_parts) - throws NoSuchObjectException, MetaException, TException { - List parts = client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts); - return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + List part_vals, short max_parts) throws TException { + return listPartitions(getDefaultCatalog(conf), db_name, tbl_name, part_vals, max_parts); } @Override - public List listPartitionsWithAuthInfo(String db_name, - String tbl_name, short max_parts, String user_name, List group_names) - throws NoSuchObjectException, MetaException, TException { - List parts = client.get_partitions_with_auth(db_name, tbl_name, max_parts, + public List listPartitions(String catName, String db_name, String tbl_name, + List part_vals, int max_parts) throws TException { + List parts = client.get_partitions_ps(prependCatalogToDbName(catName, db_name, conf), + tbl_name, part_vals, shrinkMaxtoShort(max_parts)); + return deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public List listPartitionsWithAuthInfo(String db_name, String tbl_name, + short max_parts, String user_name, + List group_names) throws TException { + return listPartitionsWithAuthInfo(getDefaultCatalog(conf), db_name, tbl_name, max_parts, user_name, + group_names); + } + + @Override + public List listPartitionsWithAuthInfo(String catName, String dbName, String tableName, + int maxParts, String userName, + List groupNames) throws TException { + List parts = client.get_partitions_with_auth(prependCatalogToDbName(catName, + dbName, conf), tableName, shrinkMaxtoShort(maxParts), userName, groupNames); + return deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public List listPartitionsWithAuthInfo(String db_name, String tbl_name, + List part_vals, short max_parts, + String user_name, List group_names) + throws TException { + return listPartitionsWithAuthInfo(getDefaultCatalog(conf), db_name, tbl_name, part_vals, max_parts, user_name, group_names); - return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts)); } @Override - public List listPartitionsWithAuthInfo(String db_name, - String tbl_name, List part_vals, short max_parts, - String user_name, List group_names) throws NoSuchObjectException, - MetaException, TException { - List parts = client.get_partitions_ps_with_auth(db_name, - tbl_name, part_vals, max_parts, user_name, group_names); - return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + public List listPartitionsWithAuthInfo(String catName, String dbName, String tableName, + List partialPvals, int maxParts, + String userName, List groupNames) + throws TException { + List parts = client.get_partitions_ps_with_auth(prependCatalogToDbName(catName, + dbName, conf), tableName, partialPvals, shrinkMaxtoShort(maxParts), userName, groupNames); + return deepCopyPartitions(filterHook.filterPartitions(parts)); } - /** - * Get list of partitions matching specified filter - * @param db_name the database name - * @param tbl_name the table name - * @param filter the filter string, - * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can - * be done only on string partition keys. - * @param max_parts the maximum number of partitions to return, - * all partitions are returned if -1 is passed - * @return list of partitions - * @throws MetaException - * @throws NoSuchObjectException - * @throws TException - */ @Override public List listPartitionsByFilter(String db_name, String tbl_name, - String filter, short max_parts) throws MetaException, - NoSuchObjectException, TException { - List parts = client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts); - return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts)); + String filter, short max_parts) throws TException { + return listPartitionsByFilter(getDefaultCatalog(conf), db_name, tbl_name, filter, max_parts); + } + + @Override + public List listPartitionsByFilter(String catName, String db_name, String tbl_name, + String filter, int max_parts) throws TException { + List parts =client.get_partitions_by_filter(prependCatalogToDbName( + catName, db_name, conf), tbl_name, filter, shrinkMaxtoShort(max_parts)); + return deepCopyPartitions(filterHook.filterPartitions(parts)); } @Override public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name, - String filter, int max_parts) throws MetaException, - NoSuchObjectException, TException { + String filter, int max_parts) + throws TException { + return listPartitionSpecsByFilter(getDefaultCatalog(conf), db_name, tbl_name, filter, max_parts); + } + + @Override + public PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_name, + String tbl_name, String filter, + int max_parts) throws TException { return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( - client.get_part_specs_by_filter(db_name, tbl_name, filter, max_parts))); + client.get_part_specs_by_filter(prependCatalogToDbName(catName, db_name, conf), tbl_name, filter, + max_parts))); } @Override public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr, - String default_partition_name, short max_parts, List result) + String default_partition_name, short max_parts, + List result) throws TException { + return listPartitionsByExpr(getDefaultCatalog(conf), db_name, tbl_name, expr, + default_partition_name, max_parts, result); + } + + @Override + public boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, byte[] expr, + String default_partition_name, int max_parts, List result) throws TException { assert result != null; PartitionsByExprRequest req = new PartitionsByExprRequest( @@ -1348,7 +1439,7 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr req.setDefaultPartitionName(default_partition_name); } if (max_parts >= 0) { - req.setMaxParts(max_parts); + req.setMaxParts(shrinkMaxtoShort(max_parts)); } PartitionsByExprResult r; try { @@ -1362,132 +1453,138 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr throw new IncompatibleMetastoreException( "Metastore doesn't support listPartitionsByExpr: " + te.getMessage()); } - if (fastpath) { - result.addAll(r.getPartitions()); - } else { - r.setPartitions(filterHook.filterPartitions(r.getPartitions())); - // TODO: in these methods, do we really need to deepcopy? - deepCopyPartitions(r.getPartitions(), result); - } + r.setPartitions(filterHook.filterPartitions(r.getPartitions())); + // TODO: in these methods, do we really need to deepcopy? + deepCopyPartitions(r.getPartitions(), result); return !r.isSetHasUnknownPartitions() || r.isHasUnknownPartitions(); // Assume the worst. } - /** - * @param name - * @return the database - * @throws NoSuchObjectException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_database(java.lang.String) - */ @Override - public Database getDatabase(String name) throws NoSuchObjectException, - MetaException, TException { - Database d = client.get_database(name); - return fastpath ? d :deepCopy(filterHook.filterDatabase(d)); + public Database getDatabase(String name) throws TException { + return getDatabase(getDefaultCatalog(conf), name); + } + + @Override + public Database getDatabase(String catalogName, String databaseName) throws TException { + Database d = client.get_database(prependCatalogToDbName(catalogName, databaseName, conf)); + return deepCopy(filterHook.filterDatabase(d)); } - /** - * @param tbl_name - * @param db_name - * @param part_vals - * @return the partition - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, - * java.lang.String, java.util.List) - */ @Override - public Partition getPartition(String db_name, String tbl_name, - List part_vals) throws NoSuchObjectException, MetaException, TException { - Partition p = client.get_partition(db_name, tbl_name, part_vals); - return fastpath ? p : deepCopy(filterHook.filterPartition(p)); + public Partition getPartition(String db_name, String tbl_name, List part_vals) + throws TException { + return getPartition(getDefaultCatalog(conf), db_name, tbl_name, part_vals); + } + + @Override + public Partition getPartition(String catName, String dbName, String tblName, + List partVals) throws TException { + Partition p = client.get_partition(prependCatalogToDbName(catName, dbName, conf), tblName, partVals); + return deepCopy(filterHook.filterPartition(p)); } @Override public List getPartitionsByNames(String db_name, String tbl_name, - List part_names) throws NoSuchObjectException, MetaException, TException { - List parts = client.get_partitions_by_names(db_name, tbl_name, part_names); - return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + List part_names) throws TException { + return getPartitionsByNames(getDefaultCatalog(conf), db_name, tbl_name, part_names); + } + + @Override + public List getPartitionsByNames(String catName, String db_name, String tbl_name, + List part_names) throws TException { + List parts = + client.get_partitions_by_names(prependCatalogToDbName(catName, db_name, conf), tbl_name, part_names); + return deepCopyPartitions(filterHook.filterPartitions(parts)); } @Override public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request) throws MetaException, TException, NoSuchObjectException { + if (!request.isSetCatName()) request.setCatName(getDefaultCatalog(conf)); return client.get_partition_values(request); } @Override public Partition getPartitionWithAuthInfo(String db_name, String tbl_name, List part_vals, String user_name, List group_names) - throws MetaException, UnknownTableException, NoSuchObjectException, - TException { - Partition p = client.get_partition_with_auth(db_name, tbl_name, part_vals, user_name, - group_names); - return fastpath ? p : deepCopy(filterHook.filterPartition(p)); + throws TException { + return getPartitionWithAuthInfo(getDefaultCatalog(conf), db_name, tbl_name, part_vals, + user_name, group_names); + } + + @Override + public Partition getPartitionWithAuthInfo(String catName, String dbName, String tableName, + List pvals, String userName, + List groupNames) throws TException { + Partition p = client.get_partition_with_auth(prependCatalogToDbName(catName, dbName, conf), tableName, + pvals, userName, groupNames); + return deepCopy(filterHook.filterPartition(p)); } - /** - * @param name - * @param dbname - * @return the table - * @throws NoSuchObjectException - * @throws MetaException - * @throws TException - * @throws NoSuchObjectException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_table(java.lang.String, - * java.lang.String) - */ @Override - public Table getTable(String dbname, String name) throws MetaException, - TException, NoSuchObjectException { - GetTableRequest req = new GetTableRequest(dbname, name); + public Table getTable(String dbname, String name) throws TException { + return getTable(getDefaultCatalog(conf), dbname, name); + } + + @Override + public Table getTable(String catName, String dbName, String tableName) throws TException { + GetTableRequest req = new GetTableRequest(dbName, tableName); + req.setCatName(catName); req.setCapabilities(version); Table t = client.get_table_req(req).getTable(); - return fastpath ? t : deepCopy(filterHook.filterTable(t)); + return deepCopy(filterHook.filterTable(t)); } - /** {@inheritDoc} */ @Override - @Deprecated - public Table getTable(String tableName) throws MetaException, TException, - NoSuchObjectException { - Table t = getTable(DEFAULT_DATABASE_NAME, tableName); - return fastpath ? t : filterHook.filterTable(t); + public List
getTableObjectsByName(String dbName, List tableNames) + throws TException { + return getTableObjectsByName(getDefaultCatalog(conf), dbName, tableNames); } - /** {@inheritDoc} */ @Override - public List
getTableObjectsByName(String dbName, List tableNames) - throws MetaException, InvalidOperationException, UnknownDBException, TException { + public List
getTableObjectsByName(String catName, String dbName, + List tableNames) throws TException { GetTablesRequest req = new GetTablesRequest(dbName); + req.setCatName(catName); req.setTblNames(tableNames); req.setCapabilities(version); List
tabs = client.get_table_objects_by_name_req(req).getTables(); - return fastpath ? tabs : deepCopyTables(filterHook.filterTables(tabs)); + return deepCopyTables(filterHook.filterTables(tabs)); } - /** {@inheritDoc} */ @Override public Map getMaterializationsInvalidationInfo(String dbName, List viewNames) throws MetaException, InvalidOperationException, UnknownDBException, TException { return client.get_materialization_invalidation_info( - dbName, filterHook.filterTableNames(dbName, viewNames)); + dbName, filterHook.filterTableNames(getDefaultCatalog(conf), dbName, viewNames)); } - /** {@inheritDoc} */ @Override public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm) throws MetaException, InvalidOperationException, UnknownDBException, TException { - client.update_creation_metadata(dbName, tableName, cm); + client.update_creation_metadata(getDefaultCatalog(conf), dbName, tableName, cm); + } + + @Override + public void updateCreationMetadata(String catName, String dbName, String tableName, + CreationMetadata cm) throws MetaException, TException { + client.update_creation_metadata(catName, dbName, tableName, cm); + } /** {@inheritDoc} */ @Override public List listTableNamesByFilter(String dbName, String filter, short maxTables) - throws MetaException, TException, InvalidOperationException, UnknownDBException { - return filterHook.filterTableNames(dbName, - client.get_table_names_by_filter(dbName, filter, maxTables)); + throws TException { + return listTableNamesByFilter(getDefaultCatalog(conf), dbName, filter, maxTables); + } + + @Override + public List listTableNamesByFilter(String catName, String dbName, String filter, + int maxTables) throws TException { + return filterHook.filterTableNames(catName, dbName, + client.get_table_names_by_filter(prependCatalogToDbName(catName, dbName, conf), filter, + shrinkMaxtoShort(maxTables))); } /** @@ -1502,34 +1599,52 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE return deepCopy(client.get_type(name)); } - /** {@inheritDoc} */ @Override public List getTables(String dbname, String tablePattern) throws MetaException { try { - return filterHook.filterTableNames(dbname, client.get_tables(dbname, tablePattern)); + return getTables(getDefaultCatalog(conf), dbname, tablePattern); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } return null; } - /** {@inheritDoc} */ + @Override + public List getTables(String catName, String dbName, String tablePattern) + throws TException { + return filterHook.filterTableNames(catName, dbName, + client.get_tables(prependCatalogToDbName(catName, dbName, conf), tablePattern)); + } + @Override public List getTables(String dbname, String tablePattern, TableType tableType) throws MetaException { try { - return filterHook.filterTableNames(dbname, - client.get_tables_by_type(dbname, tablePattern, tableType.toString())); + return getTables(getDefaultCatalog(conf), dbname, tablePattern, tableType); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } return null; } - /** {@inheritDoc} */ @Override - public List getMaterializedViewsForRewriting(String dbname) throws MetaException { + public List getTables(String catName, String dbName, String tablePattern, + TableType tableType) throws TException { + return filterHook.filterTableNames(catName, dbName, + client.get_tables_by_type(prependCatalogToDbName(catName, dbName, conf), tablePattern, + tableType.toString())); + } + + @Override + public List getMaterializedViewsForRewriting(String dbName) throws TException { + return getMaterializedViewsForRewriting(getDefaultCatalog(conf), dbName); + } + + @Override + public List getMaterializedViewsForRewriting(String catName, String dbname) + throws MetaException { try { - return filterHook.filterTableNames(dbname, client.get_materialized_views_for_rewriting(dbname)); + return filterHook.filterTableNames(catName, dbname, + client.get_materialized_views_for_rewriting(prependCatalogToDbName(catName, dbname, conf))); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } @@ -1540,38 +1655,24 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE public List getTableMeta(String dbPatterns, String tablePatterns, List tableTypes) throws MetaException { try { - return filterNames(client.get_table_meta(dbPatterns, tablePatterns, tableTypes)); + return getTableMeta(getDefaultCatalog(conf), dbPatterns, tablePatterns, tableTypes); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } return null; } - private List filterNames(List metas) throws MetaException { - Map sources = new LinkedHashMap<>(); - Map> dbTables = new LinkedHashMap<>(); - for (TableMeta meta : metas) { - sources.put(meta.getDbName() + "." + meta.getTableName(), meta); - List tables = dbTables.get(meta.getDbName()); - if (tables == null) { - dbTables.put(meta.getDbName(), tables = new ArrayList<>()); - } - tables.add(meta.getTableName()); - } - List filtered = new ArrayList<>(); - for (Map.Entry> entry : dbTables.entrySet()) { - for (String table : filterHook.filterTableNames(entry.getKey(), entry.getValue())) { - filtered.add(sources.get(entry.getKey() + "." + table)); - } - } - return filtered; + @Override + public List getTableMeta(String catName, String dbPatterns, String tablePatterns, + List tableTypes) throws TException { + return filterHook.filterTableMetas(client.get_table_meta(prependCatalogToDbName( + catName, dbPatterns, conf), tablePatterns, tableTypes)); } - /** {@inheritDoc} */ @Override public List getAllTables(String dbname) throws MetaException { try { - return filterHook.filterTableNames(dbname, client.get_all_tables(dbname)); + return getAllTables(getDefaultCatalog(conf), dbname); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } @@ -1579,10 +1680,21 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE } @Override - public boolean tableExists(String databaseName, String tableName) throws MetaException, - TException, UnknownDBException { + public List getAllTables(String catName, String dbName) throws TException { + return filterHook.filterTableNames(catName, dbName, client.get_all_tables( + prependCatalogToDbName(catName, dbName, conf))); + } + + @Override + public boolean tableExists(String databaseName, String tableName) throws TException { + return tableExists(getDefaultCatalog(conf), databaseName, tableName); + } + + @Override + public boolean tableExists(String catName, String dbName, String tableName) throws TException { try { - GetTableRequest req = new GetTableRequest(databaseName, tableName); + GetTableRequest req = new GetTableRequest(dbName, tableName); + req.setCatName(catName); req.setCapabilities(version); return filterHook.filterTable(client.get_table_req(req).getTable()) != null; } catch (NoSuchObjectException e) { @@ -1590,156 +1702,167 @@ public boolean tableExists(String databaseName, String tableName) throws MetaExc } } - /** {@inheritDoc} */ @Override - @Deprecated - public boolean tableExists(String tableName) throws MetaException, - TException, UnknownDBException { - return tableExists(DEFAULT_DATABASE_NAME, tableName); + public List listPartitionNames(String dbName, String tblName, + short max) throws NoSuchObjectException, MetaException, TException { + return listPartitionNames(getDefaultCatalog(conf), dbName, tblName, max); } @Override - public List listPartitionNames(String dbName, String tblName, - short max) throws NoSuchObjectException, MetaException, TException { - return filterHook.filterPartitionNames(dbName, tblName, - client.get_partition_names(dbName, tblName, max)); + public List listPartitionNames(String catName, String dbName, String tableName, + int maxParts) throws TException { + return filterHook.filterPartitionNames(catName, dbName, tableName, + client.get_partition_names(prependCatalogToDbName(catName, dbName, conf), tableName, shrinkMaxtoShort(maxParts))); } @Override public List listPartitionNames(String db_name, String tbl_name, - List part_vals, short max_parts) - throws MetaException, TException, NoSuchObjectException { - return filterHook.filterPartitionNames(db_name, tbl_name, - client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts)); + List part_vals, short max_parts) throws TException { + return listPartitionNames(getDefaultCatalog(conf), db_name, tbl_name, part_vals, max_parts); + } + + @Override + public List listPartitionNames(String catName, String db_name, String tbl_name, + List part_vals, int max_parts) throws TException { + return filterHook.filterPartitionNames(catName, db_name, tbl_name, + client.get_partition_names_ps(prependCatalogToDbName(catName, db_name, conf), tbl_name, + part_vals, shrinkMaxtoShort(max_parts))); } - /** - * Get number of partitions matching specified filter - * @param db_name the database name - * @param tbl_name the table name - * @param filter the filter string, - * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can - * be done only on string partition keys. - * @return number of partitions - * @throws MetaException - * @throws NoSuchObjectException - * @throws TException - */ @Override public int getNumPartitionsByFilter(String db_name, String tbl_name, - String filter) throws MetaException, - NoSuchObjectException, TException { - return client.get_num_partitions_by_filter(db_name, tbl_name, filter); + String filter) throws TException { + return getNumPartitionsByFilter(getDefaultCatalog(conf), db_name, tbl_name, filter); + } + + @Override + public int getNumPartitionsByFilter(String catName, String dbName, String tableName, + String filter) throws TException { + return client.get_num_partitions_by_filter(prependCatalogToDbName(catName, dbName, conf), tableName, + filter); } @Override public void alter_partition(String dbName, String tblName, Partition newPart) throws InvalidOperationException, MetaException, TException { - client.alter_partition_with_environment_context(dbName, tblName, newPart, null); + alter_partition(getDefaultCatalog(conf), dbName, tblName, newPart, null); } @Override public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException { - client.alter_partition_with_environment_context(dbName, tblName, newPart, environmentContext); + alter_partition(getDefaultCatalog(conf), dbName, tblName, newPart, environmentContext); + } + + @Override + public void alter_partition(String catName, String dbName, String tblName, Partition newPart, + EnvironmentContext environmentContext) throws TException { + client.alter_partition_with_environment_context(prependCatalogToDbName(catName, dbName, conf), tblName, + newPart, environmentContext); } @Override public void alter_partitions(String dbName, String tblName, List newParts) - throws InvalidOperationException, MetaException, TException { - client.alter_partitions_with_environment_context(dbName, tblName, newParts, null); + throws TException { + alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, null); } @Override - public void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) - throws InvalidOperationException, MetaException, TException { - client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext); -} + public void alter_partitions(String dbName, String tblName, List newParts, + EnvironmentContext environmentContext) throws TException { + alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, environmentContext); + } @Override - public void alterDatabase(String dbName, Database db) - throws MetaException, NoSuchObjectException, TException { - client.alter_database(dbName, db); + public void alter_partitions(String catName, String dbName, String tblName, + List newParts, + EnvironmentContext environmentContext) throws TException { + client.alter_partitions_with_environment_context(prependCatalogToDbName(catName, dbName, conf), + tblName, newParts, environmentContext); } - /** - * @param db - * @param tableName - * @throws UnknownTableException - * @throws UnknownDBException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String, - * java.lang.String) - */ + @Override - public List getFields(String db, String tableName) - throws MetaException, TException, UnknownTableException, - UnknownDBException { - List fields = client.get_fields(db, tableName); - return fastpath ? fields : deepCopyFieldSchemas(fields); + public void alterDatabase(String dbName, Database db) throws TException { + alterDatabase(getDefaultCatalog(conf), dbName, db); } @Override - public List getPrimaryKeys(PrimaryKeysRequest req) - throws MetaException, NoSuchObjectException, TException { + public void alterDatabase(String catName, String dbName, Database newDb) throws TException { + client.alter_database(prependCatalogToDbName(catName, dbName, conf), newDb); + } + + @Override + public List getFields(String db, String tableName) throws TException { + return getFields(getDefaultCatalog(conf), db, tableName); + } + + @Override + public List getFields(String catName, String db, String tableName) + throws TException { + List fields = client.get_fields(prependCatalogToDbName(catName, db, conf), tableName); + return deepCopyFieldSchemas(fields); + } + + @Override + public List getPrimaryKeys(PrimaryKeysRequest req) throws TException { + if (!req.isSetCatName()) req.setCatName(getDefaultCatalog(conf)); return client.get_primary_keys(req).getPrimaryKeys(); } @Override public List getForeignKeys(ForeignKeysRequest req) throws MetaException, NoSuchObjectException, TException { + if (!req.isSetCatName()) req.setCatName(getDefaultCatalog(conf)); return client.get_foreign_keys(req).getForeignKeys(); } @Override public List getUniqueConstraints(UniqueConstraintsRequest req) throws MetaException, NoSuchObjectException, TException { + if (!req.isSetCatName()) req.setCatName(getDefaultCatalog(conf)); return client.get_unique_constraints(req).getUniqueConstraints(); } @Override public List getNotNullConstraints(NotNullConstraintsRequest req) throws MetaException, NoSuchObjectException, TException { + if (!req.isSetCatName()) req.setCatName(getDefaultCatalog(conf)); return client.get_not_null_constraints(req).getNotNullConstraints(); } @Override public List getDefaultConstraints(DefaultConstraintsRequest req) throws MetaException, NoSuchObjectException, TException { + if (!req.isSetCatName()) req.setCatName(getDefaultCatalog(conf)); return client.get_default_constraints(req).getDefaultConstraints(); } @Override public List getCheckConstraints(CheckConstraintsRequest req) throws MetaException, NoSuchObjectException, TException { + if (!req.isSetCatName()) req.setCatName(getDefaultCatalog(conf)); return client.get_check_constraints(req).getCheckConstraints(); } /** {@inheritDoc} */ @Override - @Deprecated - //use setPartitionColumnStatistics instead - public boolean updateTableColumnStatistics(ColumnStatistics statsObj) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException, - InvalidInputException{ + public boolean updateTableColumnStatistics(ColumnStatistics statsObj) throws TException { + if (!statsObj.getStatsDesc().isSetCatName()) statsObj.getStatsDesc().setCatName(getDefaultCatalog(conf)); return client.update_table_column_statistics(statsObj); } - /** {@inheritDoc} */ @Override - @Deprecated - //use setPartitionColumnStatistics instead - public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException, - InvalidInputException{ + public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) throws TException { + if (!statsObj.getStatsDesc().isSetCatName()) statsObj.getStatsDesc().setCatName(getDefaultCatalog(conf)); return client.update_partition_column_statistics(statsObj); } - /** {@inheritDoc} */ @Override - public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException, - InvalidInputException{ + public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws TException { + String defaultCat = getDefaultCatalog(conf); + for (ColumnStatistics stats : request.getColStats()) { + if (!stats.getStatsDesc().isSetCatName()) stats.getStatsDesc().setCatName(defaultCat); + } return client.set_aggr_stats_for(request); } @@ -1753,66 +1876,84 @@ public void flushCache() { } } - /** {@inheritDoc} */ @Override public List getTableColumnStatistics(String dbName, String tableName, - List colNames) throws NoSuchObjectException, MetaException, TException, - InvalidInputException, InvalidObjectException { - return client.get_table_statistics_req( - new TableStatsRequest(dbName, tableName, colNames)).getTableStats(); + List colNames) throws TException { + return getTableColumnStatistics(getDefaultCatalog(conf), dbName, tableName, colNames); + } + + @Override + public List getTableColumnStatistics(String catName, String dbName, + String tableName, + List colNames) throws TException { + TableStatsRequest rqst = new TableStatsRequest(dbName, tableName, colNames); + rqst.setCatName(catName); + return client.get_table_statistics_req(rqst).getTableStats(); } - /** {@inheritDoc} */ @Override public Map> getPartitionColumnStatistics( String dbName, String tableName, List partNames, List colNames) - throws NoSuchObjectException, MetaException, TException { - return client.get_partitions_statistics_req( - new PartitionsStatsRequest(dbName, tableName, colNames, partNames)).getPartStats(); + throws TException { + return getPartitionColumnStatistics(getDefaultCatalog(conf), dbName, tableName, partNames, colNames); + } + + @Override + public Map> getPartitionColumnStatistics( + String catName, String dbName, String tableName, List partNames, + List colNames) throws TException { + PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName, tableName, colNames, + partNames); + rqst.setCatName(catName); + return client.get_partitions_statistics_req(rqst).getPartStats(); } - /** {@inheritDoc} */ @Override public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, - String colName) throws NoSuchObjectException, InvalidObjectException, MetaException, - TException, InvalidInputException - { - return client.delete_partition_column_statistics(dbName, tableName, partName, colName); + String colName) throws TException { + return deletePartitionColumnStatistics(getDefaultCatalog(conf), dbName, tableName, partName, + colName); + } + + @Override + public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, + String partName, String colName) + throws TException { + return client.delete_partition_column_statistics(prependCatalogToDbName(catName, dbName, conf), + tableName, partName, colName); } - /** {@inheritDoc} */ @Override public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException, - InvalidInputException - { - return client.delete_table_column_statistics(dbName, tableName, colName); + throws TException { + return deleteTableColumnStatistics(getDefaultCatalog(conf), dbName, tableName, colName); + } + + @Override + public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, + String colName) throws TException { + return client.delete_table_column_statistics(prependCatalogToDbName(catName, dbName, conf), + tableName, colName); + } + + @Override + public List getSchema(String db, String tableName) throws TException { + return getSchema(getDefaultCatalog(conf), db, tableName); } - /** - * @param db - * @param tableName - * @throws UnknownTableException - * @throws UnknownDBException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String, - * java.lang.String) - */ @Override - public List getSchema(String db, String tableName) - throws MetaException, TException, UnknownTableException, - UnknownDBException { - EnvironmentContext envCxt = null; - String addedJars = MetastoreConf.getVar(conf, ConfVars.ADDED_JARS); - if(org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) { - Map props = new HashMap(); - props.put("hive.added.jars.path", addedJars); - envCxt = new EnvironmentContext(props); - } + public List getSchema(String catName, String db, String tableName) throws TException { + EnvironmentContext envCxt = null; + String addedJars = MetastoreConf.getVar(conf, ConfVars.ADDED_JARS); + if(org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) { + Map props = new HashMap<>(); + props.put("hive.added.jars.path", addedJars); + envCxt = new EnvironmentContext(props); + } - List fields = client.get_schema_with_environment_context(db, tableName, envCxt); - return fastpath ? fields : deepCopyFieldSchemas(fields); + List fields = client.get_schema_with_environment_context(prependCatalogToDbName( + catName, db, conf), tableName, envCxt); + return deepCopyFieldSchemas(fields); } @Override @@ -1822,10 +1963,16 @@ public String getConfigValue(String name, String defaultValue) } @Override - public Partition getPartition(String db, String tableName, String partName) - throws MetaException, TException, UnknownTableException, NoSuchObjectException { - Partition p = client.get_partition_by_name(db, tableName, partName); - return fastpath ? p : deepCopy(filterHook.filterPartition(p)); + public Partition getPartition(String db, String tableName, String partName) throws TException { + return getPartition(getDefaultCatalog(conf), db, tableName, partName); + } + + @Override + public Partition getPartition(String catName, String dbName, String tblName, String name) + throws TException { + Partition p = client.get_partition_by_name(prependCatalogToDbName(catName, dbName, conf), tblName, + name); + return deepCopy(filterHook.filterPartition(p)); } public Partition appendPartitionByName(String dbName, String tableName, String partName) @@ -1838,7 +1985,7 @@ public Partition appendPartitionByName(String dbName, String tableName, String p MetaException, TException { Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName, partName, envContext); - return fastpath ? p : deepCopy(p); + return deepCopy(p); } public boolean dropPartitionByName(String dbName, String tableName, String partName, @@ -2026,6 +2173,10 @@ public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( @Override public boolean grant_privileges(PrivilegeBag privileges) throws MetaException, TException { + String defaultCat = getDefaultCatalog(conf); + for (HiveObjectPrivilege priv : privileges.getPrivileges()) { + if (!priv.getHiveObject().isSetCatName()) priv.getHiveObject().setCatName(defaultCat); + } GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest(); req.setRequestType(GrantRevokeType.GRANT); req.setPrivileges(privileges); @@ -2055,6 +2206,10 @@ public boolean revoke_role(String roleName, String userName, @Override public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) throws MetaException, TException { + String defaultCat = getDefaultCatalog(conf); + for (HiveObjectPrivilege priv : privileges.getPrivileges()) { + if (!priv.getHiveObject().isSetCatName()) priv.getHiveObject().setCatName(defaultCat); + } GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest(); req.setRequestType(GrantRevokeType.REVOKE); req.setPrivileges(privileges); @@ -2070,6 +2225,7 @@ public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) t public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, String userName, List groupNames) throws MetaException, TException { + if (!hiveObject.isSetCatName()) hiveObject.setCatName(getDefaultCatalog(conf)); return client.get_privilege_set(hiveObject, userName, groupNames); } @@ -2077,6 +2233,7 @@ public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, public List list_privileges(String principalName, PrincipalType principalType, HiveObjectRef hiveObject) throws MetaException, TException { + if (!hiveObject.isSetCatName()) hiveObject.setCatName(getDefaultCatalog(conf)); return client.list_privileges(principalName, principalType, hiveObject); } @@ -2392,12 +2549,14 @@ public CurrentNotificationEventId getCurrentNotificationEventId() throws TExcept @Override public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) throws TException { + if (!rqst.isSetCatName()) rqst.setCatName(getDefaultCatalog(conf)); return client.get_notification_events_count(rqst); } @InterfaceAudience.LimitedPrivate({"Apache Hive, HCatalog"}) @Override public FireEventResponse fireListenerEvent(FireEventRequest rqst) throws TException { + if (!rqst.isSetCatName()) rqst.setCatName(getDefaultCatalog(conf)); return client.fire_listener_event(rqst); } @@ -2438,60 +2597,83 @@ public synchronized Object invoke(Object proxy, Method method, Object [] args) @Override public void markPartitionForEvent(String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) - throws MetaException, TException, NoSuchObjectException, UnknownDBException, - UnknownTableException, - InvalidPartitionException, UnknownPartitionException { - assert db_name != null; - assert tbl_name != null; - assert partKVs != null; - client.markPartitionForEvent(db_name, tbl_name, partKVs, eventType); + throws TException { + markPartitionForEvent(getDefaultCatalog(conf), db_name, tbl_name, partKVs, eventType); + } + + @Override + public void markPartitionForEvent(String catName, String db_name, String tbl_name, + Map partKVs, + PartitionEventType eventType) throws TException { + client.markPartitionForEvent(prependCatalogToDbName(catName, db_name, conf), tbl_name, partKVs, + eventType); + } @Override public boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) - throws MetaException, NoSuchObjectException, UnknownTableException, UnknownDBException, TException, - InvalidPartitionException, UnknownPartitionException { - assert db_name != null; - assert tbl_name != null; - assert partKVs != null; - return client.isPartitionMarkedForEvent(db_name, tbl_name, partKVs, eventType); + throws TException { + return isPartitionMarkedForEvent(getDefaultCatalog(conf), db_name, tbl_name, partKVs, eventType); } @Override - public void createFunction(Function func) throws InvalidObjectException, - MetaException, TException { + public boolean isPartitionMarkedForEvent(String catName, String db_name, String tbl_name, + Map partKVs, + PartitionEventType eventType) throws TException { + return client.isPartitionMarkedForEvent(prependCatalogToDbName(catName, db_name, conf), tbl_name, + partKVs, eventType); + } + + @Override + public void createFunction(Function func) throws TException { + if (!func.isSetCatName()) func.setCatName(getDefaultCatalog(conf)); client.create_function(func); } @Override public void alterFunction(String dbName, String funcName, Function newFunction) - throws InvalidObjectException, MetaException, TException { - client.alter_function(dbName, funcName, newFunction); + throws TException { + alterFunction(getDefaultCatalog(conf), dbName, funcName, newFunction); } @Override - public void dropFunction(String dbName, String funcName) - throws MetaException, NoSuchObjectException, InvalidObjectException, - InvalidInputException, TException { - client.drop_function(dbName, funcName); + public void alterFunction(String catName, String dbName, String funcName, + Function newFunction) throws TException { + client.alter_function(prependCatalogToDbName(catName, dbName, conf), funcName, newFunction); } @Override - public Function getFunction(String dbName, String funcName) - throws MetaException, TException { - Function f = client.get_function(dbName, funcName); - return fastpath ? f : deepCopy(f); + public void dropFunction(String dbName, String funcName) throws TException { + dropFunction(getDefaultCatalog(conf), dbName, funcName); } @Override - public List getFunctions(String dbName, String pattern) - throws MetaException, TException { - return client.get_functions(dbName, pattern); + public void dropFunction(String catName, String dbName, String funcName) throws TException { + client.drop_function(prependCatalogToDbName(catName, dbName, conf), funcName); + } + + @Override + public Function getFunction(String dbName, String funcName) throws TException { + return getFunction(getDefaultCatalog(conf), dbName, funcName); + } + + @Override + public Function getFunction(String catName, String dbName, String funcName) throws TException { + return deepCopy(client.get_function(prependCatalogToDbName(catName, dbName, conf), funcName)); + } + + @Override + public List getFunctions(String dbName, String pattern) throws TException { + return getFunctions(getDefaultCatalog(conf), dbName, pattern); + } + + @Override + public List getFunctions(String catName, String dbName, String pattern) throws TException { + return client.get_functions(prependCatalogToDbName(catName, dbName, conf), pattern); } @Override - public GetAllFunctionsResponse getAllFunctions() - throws MetaException, TException { + public GetAllFunctionsResponse getAllFunctions() throws TException { return client.get_all_functions(); } @@ -2501,20 +2683,27 @@ protected void create_table_with_environment_context(Table tbl, EnvironmentConte client.create_table_with_environment_context(tbl, envContext); } - protected void drop_table_with_environment_context(String dbname, String name, - boolean deleteData, EnvironmentContext envContext) throws MetaException, TException, - NoSuchObjectException, UnsupportedOperationException { - client.drop_table_with_environment_context(dbname, name, deleteData, envContext); + protected void drop_table_with_environment_context(String catName, String dbname, String name, + boolean deleteData, EnvironmentContext envContext) throws TException { + client.drop_table_with_environment_context(prependCatalogToDbName(catName, dbname, conf), + name, deleteData, envContext); } @Override public AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, List partNames) throws NoSuchObjectException, MetaException, TException { + return getAggrColStatsFor(getDefaultCatalog(conf), dbName, tblName, colNames, partNames); + } + + @Override + public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, + List colNames, List partNames) throws TException { if (colNames.isEmpty() || partNames.isEmpty()) { LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate } PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames); + req.setCatName(catName); return client.get_aggr_stats_for(req); } @@ -2818,47 +3007,48 @@ public void createOrDropTriggerToPoolMapping(String resourcePlanName, String tri } public void createISchema(ISchema schema) throws TException { + if (!schema.isSetCatName()) schema.setCatName(getDefaultCatalog(conf)); client.create_ischema(schema); } @Override - public void alterISchema(String dbName, String schemaName, ISchema newSchema) throws TException { - client.alter_ischema(new AlterISchemaRequest(new ISchemaName(dbName, schemaName), newSchema)); + public void alterISchema(String catName, String dbName, String schemaName, ISchema newSchema) throws TException { + client.alter_ischema(new AlterISchemaRequest(new ISchemaName(catName, dbName, schemaName), newSchema)); } @Override - public ISchema getISchema(String dbName, String name) throws TException { - return client.get_ischema(new ISchemaName(dbName, name)); + public ISchema getISchema(String catName, String dbName, String name) throws TException { + return client.get_ischema(new ISchemaName(catName, dbName, name)); } @Override - public void dropISchema(String dbName, String name) throws TException { - client.drop_ischema(new ISchemaName(dbName, name)); + public void dropISchema(String catName, String dbName, String name) throws TException { + client.drop_ischema(new ISchemaName(catName, dbName, name)); } @Override public void addSchemaVersion(SchemaVersion schemaVersion) throws TException { + if (!schemaVersion.getSchema().isSetCatName()) schemaVersion.getSchema().setCatName(getDefaultCatalog(conf)); client.add_schema_version(schemaVersion); } - @Override - public SchemaVersion getSchemaVersion(String dbName, String schemaName, int version) throws TException { - return client.get_schema_version(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + public SchemaVersion getSchemaVersion(String catName, String dbName, String schemaName, int version) throws TException { + return client.get_schema_version(new SchemaVersionDescriptor(new ISchemaName(catName, dbName, schemaName), version)); } @Override - public SchemaVersion getSchemaLatestVersion(String dbName, String schemaName) throws TException { - return client.get_schema_latest_version(new ISchemaName(dbName, schemaName)); + public SchemaVersion getSchemaLatestVersion(String catName, String dbName, String schemaName) throws TException { + return client.get_schema_latest_version(new ISchemaName(catName, dbName, schemaName)); } @Override - public List getSchemaAllVersions(String dbName, String schemaName) throws TException { - return client.get_schema_all_versions(new ISchemaName(dbName, schemaName)); + public List getSchemaAllVersions(String catName, String dbName, String schemaName) throws TException { + return client.get_schema_all_versions(new ISchemaName(catName, dbName, schemaName)); } @Override - public void dropSchemaVersion(String dbName, String schemaName, int version) throws TException { - client.drop_schema_version(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + public void dropSchemaVersion(String catName, String dbName, String schemaName, int version) throws TException { + client.drop_schema_version(new SchemaVersionDescriptor(new ISchemaName(catName, dbName, schemaName), version)); } @Override @@ -2867,17 +3057,17 @@ public FindSchemasByColsResp getSchemaByCols(FindSchemasByColsRqst rqst) throws } @Override - public void mapSchemaVersionToSerde(String dbName, String schemaName, int version, String serdeName) + public void mapSchemaVersionToSerde(String catName, String dbName, String schemaName, int version, String serdeName) throws TException { client.map_schema_version_to_serde(new MapSchemaVersionToSerdeRequest( - new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version), serdeName)); + new SchemaVersionDescriptor(new ISchemaName(catName, dbName, schemaName), version), serdeName)); } @Override - public void setSchemaVersionState(String dbName, String schemaName, int version, SchemaVersionState state) + public void setSchemaVersionState(String catName, String dbName, String schemaName, int version, SchemaVersionState state) throws TException { client.set_schema_version_state(new SetSchemaVersionStateRequest(new SchemaVersionDescriptor( - new ISchemaName(dbName, schemaName), version), state)); + new ISchemaName(catName, dbName, schemaName), version), state)); } @Override @@ -2889,4 +3079,10 @@ public void addSerDe(SerDeInfo serDeInfo) throws TException { public SerDeInfo getSerDe(String serDeName) throws TException { return client.get_serde(new GetSerdeRequest(serDeName)); } + + private short shrinkMaxtoShort(int max) { + if (max < 0) return -1; + else if (max <= Short.MAX_VALUE) return (short)max; + else return Short.MAX_VALUE; + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java index e6de001000..f59f40bc33 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; import org.apache.hadoop.hive.metastore.txn.TxnStore; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; /** * An interface wrapper for HMSHandler. This interface contains methods that need to be @@ -66,25 +67,43 @@ /** * Equivalent to get_database, but does not write to audit logs, or fire pre-event listeners. * Meant to be used for internal hive classes that don't use the thrift interface. + * @param catName catalog name * @param name database name * @return database object * @throws NoSuchObjectException If the database does not exist. * @throws MetaException If another error occurs. */ - Database get_database_core(final String name) throws NoSuchObjectException, MetaException; + Database get_database_core(final String catName, final String name) + throws NoSuchObjectException, MetaException; /** * Equivalent of get_table, but does not log audits and fire pre-event listener. * Meant to be used for calls made by other hive classes, that are not using the * thrift interface. + * @param catName catalog name * @param dbname database name * @param name table name * @return Table object * @throws NoSuchObjectException If the table does not exist. * @throws MetaException If another error occurs. */ - Table get_table_core(final String dbname, final String name) throws MetaException, - NoSuchObjectException; + Table get_table_core(final String catName, final String dbname, final String name) + throws MetaException, NoSuchObjectException; + + /** + * Equivalent of get_table, but does not log audits and fire pre-event listener. + * Meant to be used for calls made by other hive classes, that are not using the + * thrift interface. Uses the configured catalog. + * @param dbName database name + * @param name table name + * @return Table object + * @throws NoSuchObjectException If the table does not exist. + * @throws MetaException If another error occurs. + */ + default Table get_table_core(final String dbName, final String name) + throws MetaException, NoSuchObjectException { + return get_table_core(MetaStoreUtils.getDefaultCatalog(getConf()), dbName, name); + } /** * Get a list of all transactional listeners. diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index c69192b731..2e146f3286 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidTxnWriteIdList; @@ -37,6 +38,7 @@ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.BasicTxnInfo; import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.CmRecycleRequest; import org.apache.hadoop.hive.metastore.api.CmRecycleResponse; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; @@ -125,6 +127,7 @@ import org.apache.hadoop.hive.metastore.api.WMTrigger; import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.ObjectPair; import org.apache.thrift.TException; @@ -176,77 +179,219 @@ String getMetaConf(String key) throws MetaException, TException; /** - * Get the names of all databases in the MetaStore that match the given pattern. - * @param databasePattern + * Create a new catalog. + * @param catalog catalog object to create. + * @throws AlreadyExistsException A catalog of this name already exists. + * @throws InvalidObjectException There is something wrong with the passed in catalog object. + * @throws MetaException something went wrong, usually either in the database or trying to + * create the directory for the catalog. + * @throws TException general thrift exception. + */ + void createCatalog(Catalog catalog) + throws AlreadyExistsException, InvalidObjectException, MetaException, TException; + + /** + * Get a catalog object. + * @param catName Name of the catalog to fetch. + * @return The catalog. + * @throws NoSuchObjectException no catalog of this name exists. + * @throws MetaException something went wrong, usually in the database. + * @throws TException general thrift exception. + */ + Catalog getCatalog(String catName) throws NoSuchObjectException, MetaException, TException; + + /** + * Get a list of all catalogs known to the system. + * @return list of catalog names + * @throws MetaException something went wrong, usually in the database. + * @throws TException general thrift exception. + */ + List getCatalogs() throws MetaException, TException; + + /** + * Drop a catalog. Catalogs must be empty to be dropped, there is no cascade for dropping a + * catalog. + * @param catName name of the catalog to drop + * @throws NoSuchObjectException no catalog of this name exists. + * @throws InvalidOperationException The catalog is not empty and cannot be dropped. + * @throws MetaException something went wrong, usually in the database. + * @throws TException general thrift exception. + */ + void dropCatalog(String catName) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException; + + /** + * Get the names of all databases in the default catalog that match the given pattern. + * @param databasePattern pattern for the database name to patch * @return List of database names. - * @throws MetaException - * @throws TException + * @throws MetaException error accessing RDBMS. + * @throws TException thrift transport error */ List getDatabases(String databasePattern) throws MetaException, TException; /** + * Get all databases in a catalog whose names match a pattern. + * @param catName catalog name. Can be null, in which case the default catalog is assumed. + * @param databasePattern pattern for the database name to match + * @return list of database names + * @throws MetaException error accessing RDBMS. + * @throws TException thrift transport error + */ + List getDatabases(String catName, String databasePattern) + throws MetaException, TException; + + /** * Get the names of all databases in the MetaStore. - * @return List of database names. - * @throws MetaException - * @throws TException + * @return List of database names in the default catalog. + * @throws MetaException error accessing RDBMS. + * @throws TException thrift transport error */ List getAllDatabases() throws MetaException, TException; /** + * Get all databases in a catalog. + * @param catName catalog name. Can be null, in which case the default catalog is assumed. + * @return list of all database names + * @throws MetaException error accessing RDBMS. + * @throws TException thrift transport error + */ + List getAllDatabases(String catName) throws MetaException, TException; + + /** * Get the names of all tables in the specified database that satisfy the supplied * table name pattern. - * @param dbName - * @param tablePattern + * @param dbName database name. + * @param tablePattern pattern for table name to conform to * @return List of table names. - * @throws MetaException - * @throws TException - * @throws UnknownDBException + * @throws MetaException error fetching information from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException indicated database to search in does not exist. */ List getTables(String dbName, String tablePattern) throws MetaException, TException, UnknownDBException; /** * Get the names of all tables in the specified database that satisfy the supplied + * table name pattern. + * @param catName catalog name. + * @param dbName database name. + * @param tablePattern pattern for table name to conform to + * @return List of table names. + * @throws MetaException error fetching information from the RDBMS + * @throws TException general thrift error + * @throws UnknownDBException indicated database to search in does not exist. + */ + List getTables(String catName, String dbName, String tablePattern) + throws MetaException, TException, UnknownDBException; + + + /** + * Get the names of all tables in the specified database that satisfy the supplied * table name pattern and table type (MANAGED_TABLE || EXTERNAL_TABLE || VIRTUAL_VIEW) * @param dbName Name of the database to fetch tables in. * @param tablePattern pattern to match for table names. * @param tableType Type of the table in the HMS store. VIRTUAL_VIEW is for views. * @return List of table names. - * @throws MetaException - * @throws TException - * @throws UnknownDBException + * @throws MetaException error fetching information from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException indicated database does not exist. */ List getTables(String dbName, String tablePattern, TableType tableType) throws MetaException, TException, UnknownDBException; /** - * Get materialized views that have rewriting enabled. + * Get the names of all tables in the specified database that satisfy the supplied + * table name pattern and table type (MANAGED_TABLE || EXTERNAL_TABLE || VIRTUAL_VIEW) + * @param catName catalog name. + * @param dbName Name of the database to fetch tables in. + * @param tablePattern pattern to match for table names. + * @param tableType Type of the table in the HMS store. VIRTUAL_VIEW is for views. + * @return List of table names. + * @throws MetaException error fetching information from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException indicated database does not exist. + */ + List getTables(String catName, String dbName, String tablePattern, TableType tableType) + throws MetaException, TException, UnknownDBException; + + /** + * Get materialized views that have rewriting enabled. This will use the default catalog. * @param dbName Name of the database to fetch materialized views from. * @return List of materialized view names. - * @throws MetaException - * @throws TException - * @throws UnknownDBException + * @throws MetaException error fetching from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException no such database */ List getMaterializedViewsForRewriting(String dbName) throws MetaException, TException, UnknownDBException; /** - * For quick GetTablesOperation + * Get materialized views that have rewriting enabled. + * @param catName catalog name. + * @param dbName Name of the database to fetch materialized views from. + * @return List of materialized view names. + * @throws MetaException error fetching from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException no such database + */ + List getMaterializedViewsForRewriting(String catName, String dbName) + throws MetaException, TException, UnknownDBException; + + /** + * Fetches just table name and comments. Useful when you need full table name + * (catalog.database.table) but don't need extra information like partition columns that + * require additional fetches from the database. + * @param dbPatterns database pattern to match, or null for all databases + * @param tablePatterns table pattern to match. + * @param tableTypes list of table types to fetch. + * @return list of TableMeta objects with information on matching tables + * @throws MetaException something went wrong with the fetch from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException No databases match the provided pattern. */ List getTableMeta(String dbPatterns, String tablePatterns, List tableTypes) throws MetaException, TException, UnknownDBException; /** + * Fetches just table name and comments. Useful when you need full table name + * (catalog.database.table) but don't need extra information like partition columns that + * require additional fetches from the database. + * @param catName catalog to search in. Search cannot cross catalogs. + * @param dbPatterns database pattern to match, or null for all databases + * @param tablePatterns table pattern to match. + * @param tableTypes list of table types to fetch. + * @return list of TableMeta objects with information on matching tables + * @throws MetaException something went wrong with the fetch from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException No databases match the provided pattern. + */ + List getTableMeta(String catName, String dbPatterns, String tablePatterns, + List tableTypes) + throws MetaException, TException, UnknownDBException; + + /** * Get the names of all tables in the specified database. - * @param dbName + * @param dbName database name * @return List of table names. - * @throws MetaException - * @throws TException - * @throws UnknownDBException + * @throws MetaException something went wrong with the fetch from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException No databases match the provided pattern. */ List getAllTables(String dbName) throws MetaException, TException, UnknownDBException; /** + * Get the names of all tables in the specified database. + * @param catName catalog name + * @param dbName database name + * @return List of table names. + * @throws MetaException something went wrong with the fetch from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException No databases match the provided pattern. + */ + List getAllTables(String catName, String dbName) + throws MetaException, TException, UnknownDBException; + + /** * Get a list of table names that match a filter. * The filter operators are LIKE, <, <=, >, >=, =, <> * @@ -281,10 +426,55 @@ * @param maxTables * The maximum number of tables returned * @return A list of table names that match the desired filter + * @throws InvalidOperationException invalid filter + * @throws UnknownDBException no such database + * @throws TException thrift transport error */ List listTableNamesByFilter(String dbName, String filter, short maxTables) - throws MetaException, TException, InvalidOperationException, UnknownDBException; + throws TException, InvalidOperationException, UnknownDBException; + /** + * Get a list of table names that match a filter. + * The filter operators are LIKE, <, <=, >, >=, =, <> + * + * In the filter statement, values interpreted as strings must be enclosed in quotes, + * while values interpreted as integers should not be. Strings and integers are the only + * supported value types. + * + * The currently supported key names in the filter are: + * Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name + * and supports all filter operators + * Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times + * and supports all filter operators except LIKE + * Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values + * and only supports the filter operators = and <>. + * Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement. + * For example, to filter on parameter keys called "retention", the key name in the filter + * statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention" + * Also, = and <> only work for keys that exist in the tables. + * E.g., filtering on tables where key1 <> value will only + * return tables that have a value for the parameter key1. + * Some example filter statements include: + * filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " + + * Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0"; + * filter = Constants.HIVE_FILTER_FIELD_OWNER + " = \"test_user\" and (" + + * Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " + + * Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\")" + * + * @param catName catalog name + * @param dbName + * The name of the database from which you will retrieve the table names + * @param filter + * The filter string + * @param maxTables + * The maximum number of tables returned + * @return A list of table names that match the desired filter + * @throws InvalidOperationException invalid filter + * @throws UnknownDBException no such database + * @throws TException thrift transport error + */ + List listTableNamesByFilter(String catName, String dbName, String filter, int maxTables) + throws TException, InvalidOperationException, UnknownDBException; /** * Drop the table. @@ -303,50 +493,109 @@ * The table wasn't found. * @throws TException * A thrift communication error occurred + * */ void dropTable(String dbname, String tableName, boolean deleteData, boolean ignoreUnknownTab) throws MetaException, TException, NoSuchObjectException; /** + * Drop the table. + * + * @param dbname + * The database for this table + * @param tableName + * The table to drop + * @param deleteData + * Should we delete the underlying data + * @param ignoreUnknownTab + * don't throw if the requested table doesn't exist * @param ifPurge * completely purge the table (skipping trash) while removing data from warehouse - * @see #dropTable(String, String, boolean, boolean) + * @throws MetaException + * Could not drop table properly. + * @throws NoSuchObjectException + * The table wasn't found. + * @throws TException + * A thrift communication error occurred */ - public void dropTable(String dbname, String tableName, boolean deleteData, + void dropTable(String dbname, String tableName, boolean deleteData, boolean ignoreUnknownTab, boolean ifPurge) throws MetaException, TException, NoSuchObjectException; /** - * Drop the table in the DEFAULT database. + * Drop the table. * + * @param dbname + * The database for this table * @param tableName * The table to drop - * @param deleteData - * Should we delete the underlying data * @throws MetaException * Could not drop table properly. - * @throws UnknownTableException + * @throws NoSuchObjectException * The table wasn't found. * @throws TException * A thrift communication error occurred - * @throws NoSuchObjectException - * The table wasn't found. - * - * @deprecated As of release 0.6.0 replaced by {@link #dropTable(String, String, boolean, boolean)}. - * This method will be removed in release 0.7.0. - */ - @Deprecated - void dropTable(String tableName, boolean deleteData) - throws MetaException, UnknownTableException, TException, NoSuchObjectException; - - /** - * @see #dropTable(String, String, boolean, boolean) */ void dropTable(String dbname, String tableName) throws MetaException, TException, NoSuchObjectException; /** + * Drop a table. + * @param catName catalog the table is in. + * @param dbName database the table is in. + * @param tableName table name. + * @param deleteData whether associated data should be deleted. + * @param ignoreUnknownTable whether a non-existent table name should be ignored + * @param ifPurge whether dropped data should be immediately removed rather than placed in HDFS + * trash. + * @throws MetaException something went wrong, usually in the RDBMS or storage. + * @throws NoSuchObjectException No table of this name exists, only thrown if + * ignoreUnknownTable is false. + * @throws TException general thrift error. + */ + void dropTable(String catName, String dbName, String tableName, boolean deleteData, + boolean ignoreUnknownTable, boolean ifPurge) + throws MetaException, NoSuchObjectException, TException; + + /** + * Drop a table. Equivalent to + * {@link #dropTable(String, String, String, boolean, boolean, boolean)} with ifPurge set to + * false. + * @param catName catalog the table is in. + * @param dbName database the table is in. + * @param tableName table name. + * @param deleteData whether associated data should be deleted. + * @param ignoreUnknownTable whether a non-existent table name should be ignored + * @throws MetaException something went wrong, usually in the RDBMS or storage. + * @throws NoSuchObjectException No table of this name exists, only thrown if + * ignoreUnknownTable is false. + * @throws TException general thrift error. + */ + default void dropTable(String catName, String dbName, String tableName, boolean deleteData, + boolean ignoreUnknownTable) + throws MetaException, NoSuchObjectException, TException { + dropTable(catName, dbName, tableName, deleteData, ignoreUnknownTable, false); + } + + /** + * Drop a table. Equivalent to + * {@link #dropTable(String, String, String, boolean, boolean, boolean)} with deleteData + * set and ignoreUnknownTable set to true and ifPurge set to false. + * @param catName catalog the table is in. + * @param dbName database the table is in. + * @param tableName table name. + * @throws MetaException something went wrong, usually in the RDBMS or storage. + * @throws NoSuchObjectException No table of this name exists, only thrown if + * ignoreUnknownTable is false. + * @throws TException general thrift error. + */ + default void dropTable(String catName, String dbName, String tableName) + throws MetaException, NoSuchObjectException, TException { + dropTable(catName, dbName, tableName, true, true, false); + } + + /** * Truncate the table/partitions in the DEFAULT database. * @param dbName * The db to which the table to be truncate belongs to @@ -354,13 +603,27 @@ void dropTable(String dbname, String tableName) * The table to truncate * @param partNames * List of partitions to truncate. NULL will truncate the whole table/all partitions - * @throws MetaException - * @throws TException - * Could not truncate table properly. + * @throws MetaException Failure in the RDBMS or storage + * @throws TException Thrift transport exception */ void truncateTable(String dbName, String tableName, List partNames) throws MetaException, TException; /** + * Truncate the table/partitions in the DEFAULT database. + * @param catName catalog name + * @param dbName + * The db to which the table to be truncate belongs to + * @param tableName + * The table to truncate + * @param partNames + * List of partitions to truncate. NULL will truncate the whole table/all partitions + * @throws MetaException Failure in the RDBMS or storage + * @throws TException Thrift transport exception + */ + void truncateTable(String catName, String dbName, String tableName, List partNames) + throws MetaException, TException; + + /** * Recycles the files recursively from the input path to the cmroot directory either by copying or moving it. * * @param request Inputs for path of the data files to be recycled to cmroot and @@ -369,43 +632,33 @@ void dropTable(String dbname, String tableName) */ CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws MetaException, TException; - boolean tableExists(String databaseName, String tableName) throws MetaException, - TException, UnknownDBException; - /** - * Check to see if the specified table exists in the DEFAULT database. - * @param tableName - * @return TRUE if DEFAULT.tableName exists, FALSE otherwise. - * @throws MetaException - * @throws TException - * @throws UnknownDBException - * @deprecated As of release 0.6.0 replaced by {@link #tableExists(String, String)}. - * This method will be removed in release 0.7.0. + * Check whether a table exists in the default catalog. + * @param databaseName database name + * @param tableName table name + * @return true if the indicated table exists, false if not + * @throws MetaException error fetching form the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException the indicated database does not exist. */ - @Deprecated - boolean tableExists(String tableName) throws MetaException, - TException, UnknownDBException; + boolean tableExists(String databaseName, String tableName) + throws MetaException, TException, UnknownDBException; /** - * Get a table object from the DEFAULT database. - * - * @param tableName - * Name of the table to fetch. - * @return An object representing the table. - * @throws MetaException - * Could not fetch the table - * @throws TException - * A thrift communication error occurred - * @throws NoSuchObjectException - * In case the table wasn't found. - * @deprecated As of release 0.6.0 replaced by {@link #getTable(String, String)}. - * This method will be removed in release 0.7.0. + * Check whether a table exists. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @return true if the indicated table exists, false if not + * @throws MetaException error fetching form the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException the indicated database does not exist. */ - @Deprecated - Table getTable(String tableName) throws MetaException, TException, NoSuchObjectException; + boolean tableExists(String catName, String dbName, String tableName) + throws MetaException, TException, UnknownDBException; /** - * Get a Database Object + * Get a Database Object in the default catalog * @param databaseName name of the database to fetch * @return the database * @throws NoSuchObjectException The database does not exist @@ -415,9 +668,21 @@ boolean tableExists(String tableName) throws MetaException, Database getDatabase(String databaseName) throws NoSuchObjectException, MetaException, TException; + /** + * Get a database. + * @param catalogName catalog name. Can be null, in which case + * {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed. + * @param databaseName database name + * @return the database object + * @throws NoSuchObjectException No database with this name exists in the specified catalog + * @throws MetaException something went wrong, usually in the RDBMS + * @throws TException general thrift error + */ + Database getDatabase(String catalogName, String databaseName) + throws NoSuchObjectException, MetaException, TException; /** - * Get a table object. + * Get a table object in the default catalog. * * @param dbName * The database the table is located in. @@ -435,7 +700,19 @@ Table getTable(String dbName, String tableName) throws MetaException, TException, NoSuchObjectException; /** - * + * Get a table object. + * @param catName catalog the table is in. + * @param dbName database the table is in. + * @param tableName table name. + * @return table object. + * @throws MetaException Something went wrong, usually in the RDBMS. + * @throws TException general thrift error. + */ + Table getTable(String catName, String dbName, String tableName) throws MetaException, TException; + + /** + * Get tables as objects (rather than just fetching their names). This is more expensive and + * should only be used if you actually need all the information about the tables. * @param dbName * The database the tables are located in. * @param tableNames @@ -457,6 +734,30 @@ Table getTable(String dbName, String tableName) throws MetaException, throws MetaException, InvalidOperationException, UnknownDBException, TException; /** + * Get tables as objects (rather than just fetching their names). This is more expensive and + * should only be used if you actually need all the information about the tables. + * @param catName catalog name + * @param dbName + * The database the tables are located in. + * @param tableNames + * The names of the tables to fetch + * @return A list of objects representing the tables. + * Only the tables that can be retrieved from the database are returned. For example, + * if none of the requested tables could be retrieved, an empty list is returned. + * There is no guarantee of ordering of the returned tables. + * @throws InvalidOperationException + * The input to this operation is invalid (e.g., the list of tables names is null) + * @throws UnknownDBException + * The requested database could not be fetched. + * @throws TException + * A thrift communication error occurred + * @throws MetaException + * Any other errors + */ + List
getTableObjectsByName(String catName, String dbName, List tableNames) + throws MetaException, InvalidOperationException, UnknownDBException, TException; + + /** * Returns the invalidation information for the materialized views given as input. */ Map getMaterializationsInvalidationInfo(String dbName, List viewNames) @@ -469,22 +770,72 @@ void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm throws MetaException, TException; /** - * @param tableName - * @param dbName - * @param partVals + * Updates the creation metadata for the materialized view. + */ + void updateCreationMetadata(String catName, String dbName, String tableName, CreationMetadata cm) + throws MetaException, TException; + + /** + /** + * Add a partition to a table and get back the resulting Partition object. This creates an + * empty default partition with just the partition values set. + * @param dbName database name + * @param tableName table name + * @param partVals partition values * @return the partition object - * @throws InvalidObjectException - * @throws AlreadyExistsException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String, - * java.lang.String, java.util.List) + * @throws InvalidObjectException no such table + * @throws AlreadyExistsException a partition with these values already exists + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error */ - Partition appendPartition(String tableName, String dbName, - List partVals) throws InvalidObjectException, - AlreadyExistsException, MetaException, TException; + Partition appendPartition(String dbName, String tableName, List partVals) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException; - Partition appendPartition(String tableName, String dbName, String name) + /** + * Add a partition to a table and get back the resulting Partition object. This creates an + * empty default partition with just the partition values set. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partVals partition values + * @return the partition object + * @throws InvalidObjectException no such table + * @throws AlreadyExistsException a partition with these values already exists + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + Partition appendPartition(String catName, String dbName, String tableName, List partVals) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + + /** + * Add a partition to a table and get back the resulting Partition object. This creates an + * empty default partition with just the partition value set. + * @param dbName database name. + * @param tableName table name. + * @param name name of the partition, should be in the form partkey=partval. + * @return new partition object. + * @throws InvalidObjectException No such table. + * @throws AlreadyExistsException Partition of this name already exists. + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + Partition appendPartition(String dbName, String tableName, String name) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + + /** + * Add a partition to a table and get back the resulting Partition object. This creates an + * empty default partition with just the partition value set. + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name. + * @param name name of the partition, should be in the form partkey=partval. + * @return new partition object. + * @throws InvalidObjectException No such table. + * @throws AlreadyExistsException Partition of this name already exists. + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + Partition appendPartition(String catName, String dbName, String tableName, String name) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** @@ -522,6 +873,15 @@ Partition add_partition(Partition partition) int add_partitions(List partitions) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + /** + * Add a partitions using a spec proxy. + * @param partitionSpec partition spec proxy + * @return number of partitions that were added + * @throws InvalidObjectException the partitionSpec is malformed. + * @throws AlreadyExistsException one or more of the partitions already exist. + * @throws MetaException error accessing the RDBMS or storage. + * @throws TException thrift transport error + */ int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; @@ -538,25 +898,46 @@ int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** - * @param dbName - * @param tblName - * @param partVals + * Get a partition. + * @param dbName database name + * @param tblName table name + * @param partVals partition values for this partition, must be in the same order as the + * partition keys of the table. * @return the partition object - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, - * java.lang.String, java.util.List) + * @throws NoSuchObjectException no such partition + * @throws MetaException error access the RDBMS. + * @throws TException thrift transport error + */ + Partition getPartition(String dbName, String tblName, List partVals) + throws NoSuchObjectException, MetaException, TException; + + /** + * Get a partition. + * @param catName catalog name + * @param dbName database name + * @param tblName table name + * @param partVals partition values for this partition, must be in the same order as the + * partition keys of the table. + * @return the partition object + * @throws NoSuchObjectException no such partition + * @throws MetaException error access the RDBMS. + * @throws TException thrift transport error */ - Partition getPartition(String dbName, String tblName, - List partVals) throws NoSuchObjectException, MetaException, TException; + Partition getPartition(String catName, String dbName, String tblName, List partVals) + throws NoSuchObjectException, MetaException, TException; /** - * @param partitionSpecs - * @param sourceDb - * @param sourceTable - * @param destdb - * @param destTableName + * Move a partition from one table to another + * @param partitionSpecs key value pairs that describe the partition to be moved. + * @param sourceDb database of the source table + * @param sourceTable name of the source table + * @param destdb database of the destination table + * @param destTableName name of the destination table * @return partition object + * @throws MetaException error accessing the RDBMS or storage + * @throws NoSuchObjectException no such table, for either source or destination table + * @throws InvalidObjectException error in partition specifications + * @throws TException thrift transport error */ Partition exchange_partition(Map partitionSpecs, String sourceDb, String sourceTable, String destdb, @@ -564,14 +945,38 @@ Partition exchange_partition(Map partitionSpecs, InvalidObjectException, TException; /** + * Move a partition from one table to another + * @param partitionSpecs key value pairs that describe the partition to be moved. + * @param sourceCat catalog of the source table + * @param sourceDb database of the source table + * @param sourceTable name of the source table + * @param destCat catalog of the destination table, for now must the same as sourceCat + * @param destdb database of the destination table + * @param destTableName name of the destination table + * @return partition object + * @throws MetaException error accessing the RDBMS or storage + * @throws NoSuchObjectException no such table, for either source or destination table + * @throws InvalidObjectException error in partition specifications + * @throws TException thrift transport error + */ + Partition exchange_partition(Map partitionSpecs, String sourceCat, + String sourceDb, String sourceTable, String destCat, String destdb, + String destTableName) throws MetaException, NoSuchObjectException, + InvalidObjectException, TException; + + /** * With the one partitionSpecs to exchange, multiple partitions could be exchanged. * e.g., year=2015/month/day, exchanging partition year=2015 results to all the partitions * belonging to it exchanged. This function returns the list of affected partitions. - * @param partitionSpecs - * @param sourceDb - * @param sourceTable - * @param destdb - * @param destTableName + * @param partitionSpecs key value pairs that describe the partition(s) to be moved. + * @param sourceDb database of the source table + * @param sourceTable name of the source table + * @param destdb database of the destination table + * @param destTableName name of the destination table + * @throws MetaException error accessing the RDBMS or storage + * @throws NoSuchObjectException no such table, for either source or destination table + * @throws InvalidObjectException error in partition specifications + * @throws TException thrift transport error * @return the list of the new partitions */ List exchange_partitions(Map partitionSpecs, @@ -580,60 +985,243 @@ Partition exchange_partition(Map partitionSpecs, InvalidObjectException, TException; /** - * @param dbName - * @param tblName + * With the one partitionSpecs to exchange, multiple partitions could be exchanged. + * e.g., year=2015/month/day, exchanging partition year=2015 results to all the partitions + * belonging to it exchanged. This function returns the list of affected partitions. + * @param partitionSpecs key value pairs that describe the partition(s) to be moved. + * @param sourceCat catalog of the source table + * @param sourceDb database of the source table + * @param sourceTable name of the source table + * @param destCat catalog of the destination table, for now must the same as sourceCat + * @param destdb database of the destination table + * @param destTableName name of the destination table + * @throws MetaException error accessing the RDBMS or storage + * @throws NoSuchObjectException no such table, for either source or destination table + * @throws InvalidObjectException error in partition specifications + * @throws TException thrift transport error + * @return the list of the new partitions + */ + List exchange_partitions(Map partitionSpecs, String sourceCat, + String sourceDb, String sourceTable, String destCat, + String destdb, String destTableName) + throws MetaException, NoSuchObjectException, InvalidObjectException, TException; + + /** + * Get a Partition by name. + * @param dbName database name. + * @param tblName table name. * @param name - partition name i.e. 'ds=2010-02-03/ts=2010-02-03 18%3A16%3A01' * @return the partition object - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, - * java.lang.String, java.util.List) + * @throws MetaException error access the RDBMS. + * @throws TException thrift transport error + */ + Partition getPartition(String dbName, String tblName, String name) + throws MetaException, UnknownTableException, NoSuchObjectException, TException; + + /** + * Get a Partition by name. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param name - partition name i.e. 'ds=2010-02-03/ts=2010-02-03 18%3A16%3A01' + * @return the partition object + * @throws MetaException error access the RDBMS. + * @throws TException thrift transport error */ - Partition getPartition(String dbName, String tblName, - String name) throws MetaException, UnknownTableException, NoSuchObjectException, TException; + Partition getPartition(String catName, String dbName, String tblName, String name) + throws MetaException, UnknownTableException, NoSuchObjectException, TException; /** - * @param dbName - * @param tableName - * @param pvals - * @param userName - * @param groupNames + * Get a Partition along with authorization information. + * @param dbName database name + * @param tableName table name + * @param pvals partition values, must be in the same order as the tables partition keys + * @param userName name of the calling user + * @param groupNames groups the call * @return the partition - * @throws MetaException - * @throws UnknownTableException - * @throws NoSuchObjectException - * @throws TException + * @throws MetaException error accessing the RDBMS + * @throws UnknownTableException no such table + * @throws NoSuchObjectException no such partition + * @throws TException thrift transport error */ Partition getPartitionWithAuthInfo(String dbName, String tableName, List pvals, String userName, List groupNames) throws MetaException, UnknownTableException, NoSuchObjectException, TException; /** - * @param tbl_name - * @param db_name - * @param max_parts + * Get a Partition along with authorization information. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param pvals partition values, must be in the same order as the tables partition keys + * @param userName name of the calling user + * @param groupNames groups the call + * @return the partition + * @throws MetaException error accessing the RDBMS + * @throws UnknownTableException no such table + * @throws NoSuchObjectException no such partition + * @throws TException thrift transport error + */ + Partition getPartitionWithAuthInfo(String catName, String dbName, String tableName, + List pvals, String userName, List groupNames) + throws MetaException, UnknownTableException, NoSuchObjectException, TException; + + /** + * Get a list of partittions for a table. + * @param db_name database name + * @param tbl_name table name + * @param max_parts maximum number of parts to return, -1 for all * @return the list of partitions - * @throws NoSuchObjectException - * @throws MetaException - * @throws TException + * @throws NoSuchObjectException No such table. + * @throws MetaException error accessing RDBMS. + * @throws TException thrift transport error */ - List listPartitions(String db_name, String tbl_name, - short max_parts) throws NoSuchObjectException, MetaException, TException; + List listPartitions(String db_name, String tbl_name, short max_parts) + throws NoSuchObjectException, MetaException, TException; + + /** + * Get a list of partittions for a table. + * @param catName catalog name + * @param db_name database name + * @param tbl_name table name + * @param max_parts maximum number of parts to return, -1 for all + * @return the list of partitions + * @throws NoSuchObjectException No such table. + * @throws MetaException error accessing RDBMS. + * @throws TException thrift transport error + */ + List listPartitions(String catName, String db_name, String tbl_name, int max_parts) + throws NoSuchObjectException, MetaException, TException; - public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) + /** + * Get a list of partitions from a table, returned in the form of PartitionSpecProxy + * @param dbName database name. + * @param tableName table name. + * @param maxParts maximum number of partitions to return, or -1 for all + * @return a PartitionSpecProxy + * @throws TException thrift transport error + */ + PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException; + + /** + * Get a list of partitions from a table, returned in the form of PartitionSpecProxy + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name. + * @param maxParts maximum number of partitions to return, or -1 for all + * @return a PartitionSpecProxy + * @throws TException thrift transport error + */ + PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName, + int maxParts) throws TException; + + /** + * Get a list of partitions based on a (possibly partial) list of partition values. + * @param db_name database name. + * @param tbl_name table name. + * @param part_vals partition values, in order of the table partition keys. These can be + * partial, or .* to match all values for a particular key. + * @param max_parts maximum number of partitions to return, or -1 for all. + * @return list of partitions + * @throws NoSuchObjectException no such table. + * @throws MetaException error accessing the database or processing the partition values. + * @throws TException thrift transport error. + */ List listPartitions(String db_name, String tbl_name, List part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException; + /** + * Get a list of partitions based on a (possibly partial) list of partition values. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param part_vals partition values, in order of the table partition keys. These can be + * partial, or .* to match all values for a particular key. + * @param max_parts maximum number of partitions to return, or -1 for all. + * @return list of partitions + * @throws NoSuchObjectException no such table. + * @throws MetaException error accessing the database or processing the partition values. + * @throws TException thrift transport error. + */ + List listPartitions(String catName, String db_name, String tbl_name, + List part_vals, int max_parts) + throws NoSuchObjectException, MetaException, TException; + + /** + * List Names of partitions in a table. + * @param db_name database name. + * @param tbl_name table name. + * @param max_parts maximum number of parts of fetch, or -1 to fetch them all. + * @return list of partition names. + * @throws NoSuchObjectException No such table. + * @throws MetaException Error accessing the RDBMS. + * @throws TException thrift transport error + */ List listPartitionNames(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, TException; + /** + * List Names of partitions in a table. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param max_parts maximum number of parts of fetch, or -1 to fetch them all. + * @return list of partition names. + * @throws NoSuchObjectException No such table. + * @throws MetaException Error accessing the RDBMS. + * @throws TException thrift transport error + */ + List listPartitionNames(String catName, String db_name, String tbl_name, + int max_parts) throws NoSuchObjectException, MetaException, TException; + + /** + * Get a list of partition names matching a partial specification of the partition values. + * @param db_name database name. + * @param tbl_name table name. + * @param part_vals partial list of partition values. These must be given in the order of the + * partition keys. If you wish to accept any value for a particular key you + * can pass ".*" for that value in this list. + * @param max_parts maximum number of partition names to return, or -1 to return all that are + * found. + * @return list of matching partition names. + * @throws MetaException error accessing the RDBMS. + * @throws TException thrift transport error. + * @throws NoSuchObjectException no such table. + */ List listPartitionNames(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, TException, NoSuchObjectException; - public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request) + /** + * Get a list of partition names matching a partial specification of the partition values. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param part_vals partial list of partition values. These must be given in the order of the + * partition keys. If you wish to accept any value for a particular key you + * can pass ".*" for that value in this list. + * @param max_parts maximum number of partition names to return, or -1 to return all that are + * found. + * @return list of matching partition names. + * @throws MetaException error accessing the RDBMS. + * @throws TException thrift transport error. + * @throws NoSuchObjectException no such table. + */ + List listPartitionNames(String catName, String db_name, String tbl_name, + List part_vals, int max_parts) + throws MetaException, TException, NoSuchObjectException; + + /** + * Get a list of partition values + * @param request request + * @return reponse + * @throws MetaException error accessing RDBMS + * @throws TException thrift transport error + * @throws NoSuchObjectException no such table + */ + PartitionValuesResponse listPartitionValues(PartitionValuesRequest request) throws MetaException, TException, NoSuchObjectException; /** @@ -644,15 +1232,31 @@ public PartitionValuesResponse listPartitionValues(PartitionValuesRequest reques * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can * be done only on string partition keys. * @return number of partitions - * @throws MetaException - * @throws NoSuchObjectException - * @throws TException + * @throws MetaException error accessing RDBMS or processing the filter + * @throws NoSuchObjectException no such table + * @throws TException thrift transport error + */ + int getNumPartitionsByFilter(String dbName, String tableName, + String filter) throws MetaException, NoSuchObjectException, TException; + + /** + * Get number of partitions matching specified filter + * @param catName catalog name + * @param dbName the database name + * @param tableName the table name + * @param filter the filter string, + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * be done only on string partition keys. + * @return number of partitions + * @throws MetaException error accessing RDBMS or processing the filter + * @throws NoSuchObjectException no such table + * @throws TException thrift transport error */ - public int getNumPartitionsByFilter(String dbName, String tableName, - String filter) throws MetaException, NoSuchObjectException, TException; + int getNumPartitionsByFilter(String catName, String dbName, String tableName, + String filter) throws MetaException, NoSuchObjectException, TException; - /** + /** * Get list of partitions matching specified filter * @param db_name the database name * @param tbl_name the table name @@ -662,17 +1266,64 @@ public int getNumPartitionsByFilter(String dbName, String tableName, * @param max_parts the maximum number of partitions to return, * all partitions are returned if -1 is passed * @return list of partitions - * @throws MetaException - * @throws NoSuchObjectException - * @throws TException + * @throws MetaException Error accessing the RDBMS or processing the filter. + * @throws NoSuchObjectException No such table. + * @throws TException thrift transport error */ List listPartitionsByFilter(String db_name, String tbl_name, - String filter, short max_parts) throws MetaException, - NoSuchObjectException, TException; + String filter, short max_parts) throws MetaException, NoSuchObjectException, TException; + + /** + * Get list of partitions matching specified filter + * @param catName catalog name. + * @param db_name the database name + * @param tbl_name the table name + * @param filter the filter string, + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * be done only on string partition keys. + * @param max_parts the maximum number of partitions to return, + * all partitions are returned if -1 is passed + * @return list of partitions + * @throws MetaException Error accessing the RDBMS or processing the filter. + * @throws NoSuchObjectException No such table. + * @throws TException thrift transport error + */ + List listPartitionsByFilter(String catName, String db_name, String tbl_name, + String filter, int max_parts) + throws MetaException, NoSuchObjectException, TException; + /** + * Get a list of partitions in a PartitionSpec, using a filter to select which partitions to + * fetch. + * @param db_name database name + * @param tbl_name table name + * @param filter SQL where clause filter + * @param max_parts maximum number of partitions to fetch, or -1 for all + * @return PartitionSpec + * @throws MetaException error accessing RDBMS or processing the filter + * @throws NoSuchObjectException No table matches the request + * @throws TException thrift transport error + */ PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name, - String filter, int max_parts) throws MetaException, - NoSuchObjectException, TException; + String filter, int max_parts) + throws MetaException, NoSuchObjectException, TException; + + /** + * Get a list of partitions in a PartitionSpec, using a filter to select which partitions to + * fetch. + * @param catName catalog name + * @param db_name database name + * @param tbl_name table name + * @param filter SQL where clause filter + * @param max_parts maximum number of partitions to fetch, or -1 for all + * @return PartitionSpec + * @throws MetaException error accessing RDBMS or processing the filter + * @throws NoSuchObjectException No table matches the request + * @throws TException thrift transport error + */ + PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_name, String tbl_name, + String filter, int max_parts) + throws MetaException, NoSuchObjectException, TException; /** * Get list of partitions matching specified serialized expression @@ -685,22 +1336,61 @@ PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name, * metastore server-side configuration is used. * @param result the resulting list of partitions * @return whether the resulting list contains partitions which may or may not match the expr + * @throws TException thrift transport error or error executing the filter. */ boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr, String default_partition_name, short max_parts, List result) throws TException; /** - * @param dbName - * @param tableName - * @param s - * @param userName - * @param groupNames + * Get list of partitions matching specified serialized expression + * @param catName catalog name + * @param db_name the database name + * @param tbl_name the table name + * @param expr expression, serialized from ExprNodeDesc + * @param max_parts the maximum number of partitions to return, + * all partitions are returned if -1 is passed + * @param default_partition_name Default partition name from configuration. If blank, the + * metastore server-side configuration is used. + * @param result the resulting list of partitions + * @return whether the resulting list contains partitions which may or may not match the expr + * @throws TException thrift transport error or error executing the filter. + */ + boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, byte[] expr, + String default_partition_name, int max_parts, List result) + throws TException; + + /** + * List partitions, fetching the authorization information along with the partitions. + * @param dbName database name + * @param tableName table name + * @param maxParts maximum number of partitions to fetch, or -1 for all + * @param userName user to fetch privileges for + * @param groupNames groups to fetch privileges for * @return the list of partitions - * @throws NoSuchObjectException + * @throws NoSuchObjectException no partitions matching the criteria were found + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error */ List listPartitionsWithAuthInfo(String dbName, - String tableName, short s, String userName, List groupNames) + String tableName, short maxParts, String userName, List groupNames) + throws MetaException, TException, NoSuchObjectException; + + /** + * List partitions, fetching the authorization information along with the partitions. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param maxParts maximum number of partitions to fetch, or -1 for all + * @param userName user to fetch privileges for + * @param groupNames groups to fetch privileges for + * @return the list of partitions + * @throws NoSuchObjectException no partitions matching the criteria were found + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + List listPartitionsWithAuthInfo(String catName, String dbName, String tableName, + int maxParts, String userName, List groupNames) throws MetaException, TException, NoSuchObjectException; /** @@ -709,62 +1399,138 @@ boolean listPartitionsByExpr(String db_name, String tbl_name, * @param tbl_name table name * @param part_names list of partition names * @return list of Partition objects - * @throws NoSuchObjectException - * @throws MetaException - * @throws TException + * @throws NoSuchObjectException No such partitions + * @throws MetaException error accessing the RDBMS. + * @throws TException thrift transport error */ List getPartitionsByNames(String db_name, String tbl_name, List part_names) throws NoSuchObjectException, MetaException, TException; /** - * @param dbName - * @param tableName - * @param partialPvals - * @param s - * @param userName - * @param groupNames - * @return the list of paritions - * @throws NoSuchObjectException + * Get partitions by a list of partition names. + * @param catName catalog name + * @param db_name database name + * @param tbl_name table name + * @param part_names list of partition names + * @return list of Partition objects + * @throws NoSuchObjectException No such partitions + * @throws MetaException error accessing the RDBMS. + * @throws TException thrift transport error + */ + List getPartitionsByNames(String catName, String db_name, String tbl_name, + List part_names) + throws NoSuchObjectException, MetaException, TException; + + /** + * List partitions along with privilege information for a user or groups + * @param dbName database name + * @param tableName table name + * @param partialPvals partition values, can be partial + * @param maxParts maximum number of partitions to fetch, or -1 for all + * @param userName user to fetch privilege information for + * @param groupNames group to fetch privilege information for + * @return the list of partitions + * @throws NoSuchObjectException no partitions matching the criteria were found + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error */ List listPartitionsWithAuthInfo(String dbName, - String tableName, List partialPvals, short s, String userName, + String tableName, List partialPvals, short maxParts, String userName, List groupNames) throws MetaException, TException, NoSuchObjectException; /** - * @param db_name - * @param tbl_name - * @param partKVs - * @param eventType - * @throws MetaException - * @throws NoSuchObjectException - * @throws TException - * @throws UnknownTableException - * @throws UnknownDBException - * @throws UnknownPartitionException - * @throws InvalidPartitionException + * List partitions along with privilege information for a user or groups + * @param dbName database name + * @param tableName table name + * @param partialPvals partition values, can be partial + * @param maxParts maximum number of partitions to fetch, or -1 for all + * @param userName user to fetch privilege information for + * @param groupNames group to fetch privilege information for + * @return the list of partitions + * @throws NoSuchObjectException no partitions matching the criteria were found + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + List listPartitionsWithAuthInfo(String catName, String dbName, String tableName, + List partialPvals, int maxParts, String userName, + List groupNames) + throws MetaException, TException, NoSuchObjectException; + + /** + * Mark an event as having occurred on a partition. + * @param db_name database name + * @param tbl_name table name + * @param partKVs key value pairs that describe the partition + * @param eventType type of the event + * @throws MetaException error access the RDBMS + * @throws NoSuchObjectException never throws this AFAICT + * @throws TException thrift transport error + * @throws UnknownTableException no such table + * @throws UnknownDBException no such database + * @throws UnknownPartitionException no such partition + * @throws InvalidPartitionException partition partKVs is invalid */ void markPartitionForEvent(String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException; /** - * @param db_name - * @param tbl_name - * @param partKVs - * @param eventType - * @throws MetaException - * @throws NoSuchObjectException - * @throws TException - * @throws UnknownTableException - * @throws UnknownDBException - * @throws UnknownPartitionException - * @throws InvalidPartitionException + * Mark an event as having occurred on a partition. + * @param catName catalog name + * @param db_name database name + * @param tbl_name table name + * @param partKVs key value pairs that describe the partition + * @param eventType type of the event + * @throws MetaException error access the RDBMS + * @throws NoSuchObjectException never throws this AFAICT + * @throws TException thrift transport error + * @throws UnknownTableException no such table + * @throws UnknownDBException no such database + * @throws UnknownPartitionException no such partition + * @throws InvalidPartitionException partition partKVs is invalid + */ + void markPartitionForEvent(String catName, String db_name, String tbl_name, Map partKVs, + PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, + UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException; + + /** + * Determine whether a partition has been marked with a particular event type. + * @param db_name database name + * @param tbl_name table name. + * @param partKVs key value pairs that describe the partition. + * @param eventType event type + * @throws MetaException error access the RDBMS + * @throws NoSuchObjectException never throws this AFAICT + * @throws TException thrift transport error + * @throws UnknownTableException no such table + * @throws UnknownDBException no such database + * @throws UnknownPartitionException no such partition + * @throws InvalidPartitionException partition partKVs is invalid */ boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException; /** + * Determine whether a partition has been marked with a particular event type. + * @param catName catalog name + * @param db_name database name + * @param tbl_name table name. + * @param partKVs key value pairs that describe the partition. + * @param eventType event type + * @throws MetaException error access the RDBMS + * @throws NoSuchObjectException never throws this AFAICT + * @throws TException thrift transport error + * @throws UnknownTableException no such table + * @throws UnknownDBException no such database + * @throws UnknownPartitionException no such partition + * @throws InvalidPartitionException partition partKVs is invalid + */ + boolean isPartitionMarkedForEvent(String catName, String db_name, String tbl_name, Map partKVs, + PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, + UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException; + + /** * @param partVals * @throws TException * @throws MetaException @@ -784,92 +1550,449 @@ boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map part_vals, boolean deleteData) throws NoSuchObjectException, + MetaException, TException; + + /** + * Drop a partition. + * @param catName catalog name. + * @param db_name database name + * @param tbl_name table name + * @param part_vals partition values, in the same order as the partition keys + * @param deleteData + * delete the underlying data or just delete the partition in metadata + * @return true or false + * @throws NoSuchObjectException partition does not exist + * @throws MetaException error accessing the RDBMS or the storage. + * @throws TException thrift transport error + */ + boolean dropPartition(String catName, String db_name, String tbl_name, + List part_vals, boolean deleteData) throws NoSuchObjectException, + MetaException, TException; + + /** + * Drop a partition with the option to purge the partition data directly, + * rather than to move data to trash. + * @param db_name Name of the database. + * @param tbl_name Name of the table. + * @param part_vals Specification of the partitions being dropped. + * @param options PartitionDropOptions for the operation. + * @return True (if partitions are dropped), else false. + * @throws NoSuchObjectException partition does not exist + * @throws MetaException error accessing the RDBMS or the storage. + * @throws TException thrift transport error. */ - boolean dropPartition(String db_name, String tbl_name, - List part_vals, boolean deleteData) throws NoSuchObjectException, - MetaException, TException; + boolean dropPartition(String db_name, String tbl_name, List part_vals, + PartitionDropOptions options) + throws NoSuchObjectException, MetaException, TException; /** - * Method to dropPartitions() with the option to purge the partition data directly, + * Drop a partition with the option to purge the partition data directly, * rather than to move data to trash. + * @param catName catalog name. * @param db_name Name of the database. * @param tbl_name Name of the table. * @param part_vals Specification of the partitions being dropped. * @param options PartitionDropOptions for the operation. * @return True (if partitions are dropped), else false. - * @throws TException + * @throws NoSuchObjectException partition does not exist + * @throws MetaException error accessing the RDBMS or the storage. + * @throws TException thrift transport error. */ - boolean dropPartition(String db_name, String tbl_name, List part_vals, - PartitionDropOptions options) throws TException; + boolean dropPartition(String catName, String db_name, String tbl_name, List part_vals, + PartitionDropOptions options) + throws NoSuchObjectException, MetaException, TException; + /** + * Drop partitions based on an expression. + * @param dbName database name. + * @param tblName table name. + * @param partExprs I don't understand this fully, so can't completely explain it. The second + * half of the object pair is an expression used to determine which partitions + * to drop. The first half has something to do with archive level, but I + * don't understand what. I'm also not sure what happens if you pass multiple + * expressions. + * @param deleteData whether to delete the data as well as the metadata. + * @param ifExists if true, it is not an error if no partitions match the expression(s). + * @return list of deleted partitions. + * @throws NoSuchObjectException No partition matches the expression(s), and ifExists was false. + * @throws MetaException error access the RDBMS or storage. + * @throws TException Thrift transport error. + */ List dropPartitions(String dbName, String tblName, List> partExprs, boolean deleteData, boolean ifExists) throws NoSuchObjectException, MetaException, TException; + /** + * Drop partitions based on an expression. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partExprs I don't understand this fully, so can't completely explain it. The second + * half of the object pair is an expression used to determine which partitions + * to drop. The first half has something to do with archive level, but I + * don't understand what. I'm also not sure what happens if you pass multiple + * expressions. + * @param deleteData whether to delete the data as well as the metadata. + * @param ifExists if true, it is not an error if no partitions match the expression(s). + * @return list of deleted partitions. + * @throws NoSuchObjectException No partition matches the expression(s), and ifExists was false. + * @throws MetaException error access the RDBMS or storage. + * @throws TException Thrift transport error. + */ + default List dropPartitions(String catName, String dbName, String tblName, + List> partExprs, + boolean deleteData, boolean ifExists) + throws NoSuchObjectException, MetaException, TException { + return dropPartitions(catName, dbName, tblName, partExprs, + PartitionDropOptions.instance() + .deleteData(deleteData) + .ifExists(ifExists)); + } + + /** + * Drop partitions based on an expression. + * @param dbName database name. + * @param tblName table name. + * @param partExprs I don't understand this fully, so can't completely explain it. The second + * half of the object pair is an expression used to determine which partitions + * to drop. The first half has something to do with archive level, but I + * don't understand what. I'm also not sure what happens if you pass multiple + * expressions. + * @param deleteData whether to delete the data as well as the metadata. + * @param ifExists if true, it is not an error if no partitions match the expression(s). + * @param needResults if true, the list of deleted partitions will be returned, if not, null + * will be returned. + * @return list of deleted partitions. + * @throws NoSuchObjectException No partition matches the expression(s), and ifExists was false. + * @throws MetaException error access the RDBMS or storage. + * @throws TException Thrift transport error. + * @deprecated Use {@link #dropPartitions(String, String, String, List, boolean, boolean, boolean)} + */ List dropPartitions(String dbName, String tblName, List> partExprs, boolean deleteData, boolean ifExists, boolean needResults) throws NoSuchObjectException, MetaException, TException; /** + * Drop partitions based on an expression. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partExprs I don't understand this fully, so can't completely explain it. The second + * half of the object pair is an expression used to determine which partitions + * to drop. The first half has something to do with archive level, but I + * don't understand what. I'm also not sure what happens if you pass multiple + * expressions. + * @param deleteData whether to delete the data as well as the metadata. + * @param ifExists if true, it is not an error if no partitions match the expression(s). + * @param needResults if true, the list of deleted partitions will be returned, if not, null + * will be returned. + * @return list of deleted partitions, if needResults is true + * @throws NoSuchObjectException No partition matches the expression(s), and ifExists was false. + * @throws MetaException error access the RDBMS or storage. + * @throws TException Thrift transport error. + */ + default List dropPartitions(String catName, String dbName, String tblName, + List> partExprs, boolean deleteData, + boolean ifExists, boolean needResults) + throws NoSuchObjectException, MetaException, TException { + return dropPartitions(catName, dbName, tblName, partExprs, + PartitionDropOptions.instance() + .deleteData(deleteData) + .ifExists(ifExists) + .returnResults(needResults)); + } + + /** * Generalization of dropPartitions(), * @param dbName Name of the database * @param tblName Name of the table * @param partExprs Partition-specification * @param options Boolean options for dropping partitions * @return List of Partitions dropped + * @throws NoSuchObjectException No partition matches the expression(s), and ifExists was false. + * @throws MetaException error access the RDBMS or storage. * @throws TException On failure */ List dropPartitions(String dbName, String tblName, - List> partExprs, PartitionDropOptions options) throws TException; + List> partExprs, + PartitionDropOptions options) + throws NoSuchObjectException, MetaException, TException; + + /** + * Generalization of dropPartitions(), + * @param catName catalog name + * @param dbName Name of the database + * @param tblName Name of the table + * @param partExprs Partition-specification + * @param options Boolean options for dropping partitions + * @return List of Partitions dropped + * @throws NoSuchObjectException No partition matches the expression(s), and ifExists was false. + * @throws MetaException error access the RDBMS or storage. + * @throws TException On failure + */ + List dropPartitions(String catName, String dbName, String tblName, + List> partExprs, + PartitionDropOptions options) + throws NoSuchObjectException, MetaException, TException; + /** + * Drop a partition. + * @param db_name database name. + * @param tbl_name table name. + * @param name partition name. + * @param deleteData whether to delete the data or just the metadata. + * @return true if the partition was dropped. + * @throws NoSuchObjectException no such partition. + * @throws MetaException error accessing the RDBMS or storage + * @throws TException thrift transport error + */ boolean dropPartition(String db_name, String tbl_name, String name, boolean deleteData) throws NoSuchObjectException, MetaException, TException; /** + * Drop a partition. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param name partition name. + * @param deleteData whether to delete the data or just the metadata. + * @return true if the partition was dropped. + * @throws NoSuchObjectException no such partition. + * @throws MetaException error accessing the RDBMS or storage + * @throws TException thrift transport error + */ + boolean dropPartition(String catName, String db_name, String tbl_name, + String name, boolean deleteData) + throws NoSuchObjectException, MetaException, TException; + + /** * updates a partition to new partition * * @param dbName @@ -890,6 +2013,27 @@ void alter_partition(String dbName, String tblName, Partition newPart) /** * updates a partition to new partition + * @param catName catalog name + * @param dbName + * database of the old partition + * @param tblName + * table name of the old partition + * @param newPart + * new partition + * @throws InvalidOperationException + * if the old partition does not exist + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + default void alter_partition(String catName, String dbName, String tblName, Partition newPart) + throws InvalidOperationException, MetaException, TException { + alter_partition(catName, dbName, tblName, newPart, null); + } + + /** + * updates a partition to new partition * * @param dbName * database of the old partition @@ -908,6 +2052,26 @@ void alter_partition(String dbName, String tblName, Partition newPart, Environme throws InvalidOperationException, MetaException, TException; /** + * updates a partition to new partition + * @param catName catalog name. + * @param dbName + * database of the old partition + * @param tblName + * table name of the old partition + * @param newPart + * new partition + * @throws InvalidOperationException + * if the old partition does not exist + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + void alter_partition(String catName, String dbName, String tblName, Partition newPart, + EnvironmentContext environmentContext) + throws InvalidOperationException, MetaException, TException; + + /** * updates a list of partitions * * @param dbName @@ -935,7 +2099,7 @@ void alter_partitions(String dbName, String tblName, List newParts) * table name of the old partition * @param newParts * list of partitions - * @param environmentContext + * @param environmentContext key value pairs to pass to alter function. * @throws InvalidOperationException * if the old partition does not exist * @throws MetaException @@ -948,11 +2112,54 @@ void alter_partitions(String dbName, String tblName, List newParts, throws InvalidOperationException, MetaException, TException; /** + * updates a list of partitions + * @param catName catalog name. + * @param dbName + * database of the old partition + * @param tblName + * table name of the old partition + * @param newParts + * list of partitions + * @throws InvalidOperationException + * if the old partition does not exist + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + default void alter_partitions(String catName, String dbName, String tblName, + List newParts) + throws InvalidOperationException, MetaException, TException { + alter_partitions(catName, dbName, tblName, newParts, null); + } + + /** + * updates a list of partitions + * @param catName catalog name. + * @param dbName + * database of the old partition + * @param tblName + * table name of the old partition + * @param newParts + * list of partitions + * @param environmentContext key value pairs to pass to alter function. + * @throws InvalidOperationException + * if the old partition does not exist + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + void alter_partitions(String catName, String dbName, String tblName, List newParts, + EnvironmentContext environmentContext) + throws InvalidOperationException, MetaException, TException; + + /** * rename a partition to a new partition * * @param dbname * database of the old partition - * @param name + * @param tableName * table name of the old partition * @param part_vals * values of the old partition @@ -965,38 +2172,91 @@ void alter_partitions(String dbName, String tblName, List newParts, * @throws TException * if error in communicating with metastore server */ - void renamePartition(final String dbname, final String name, final List part_vals, final Partition newPart) + void renamePartition(final String dbname, final String tableName, final List part_vals, + final Partition newPart) throws InvalidOperationException, MetaException, TException; /** - * @param db + * rename a partition to a new partition + * @param catName catalog name. + * @param dbname + * database of the old partition * @param tableName - * @throws UnknownTableException - * @throws UnknownDBException + * table name of the old partition + * @param part_vals + * values of the old partition + * @param newPart + * new partition + * @throws InvalidOperationException + * if srcFs and destFs are different * @throws MetaException + * if error in updating metadata * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String, - * java.lang.String) + * if error in communicating with metastore server + */ + void renamePartition(String catName, String dbname, String tableName, List part_vals, + Partition newPart) + throws InvalidOperationException, MetaException, TException; + + /** + * Get schema for a table, excluding the partition columns. + * @param db database name + * @param tableName table name + * @return list of field schemas describing the table's schema + * @throws UnknownTableException no such table + * @throws UnknownDBException no such database + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error */ List getFields(String db, String tableName) throws MetaException, TException, UnknownTableException, UnknownDBException; /** - * @param db - * @param tableName - * @throws UnknownTableException - * @throws UnknownDBException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String, - * java.lang.String) + * Get schema for a table, excluding the partition columns. + * @param catName catalog name + * @param db database name + * @param tableName table name + * @return list of field schemas describing the table's schema + * @throws UnknownTableException no such table + * @throws UnknownDBException no such database + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + List getFields(String catName, String db, String tableName) + throws MetaException, TException, UnknownTableException, + UnknownDBException; + + /** + * Get schema for a table, including the partition columns. + * @param db database name + * @param tableName table name + * @return list of field schemas describing the table's schema + * @throws UnknownTableException no such table + * @throws UnknownDBException no such database + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error */ List getSchema(String db, String tableName) throws MetaException, TException, UnknownTableException, UnknownDBException; /** + * Get schema for a table, including the partition columns. + * @param catName catalog name + * @param db database name + * @param tableName table name + * @return list of field schemas describing the table's schema + * @throws UnknownTableException no such table + * @throws UnknownDBException no such database + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + List getSchema(String catName, String db, String tableName) + throws MetaException, TException, UnknownTableException, + UnknownDBException; + + /** * @param name * name of the configuration property to get the value of * @param defaultValue @@ -1039,7 +2299,6 @@ String getConfigValue(String name, String defaultValue) * @throws TException * @throws InvalidInputException */ - boolean updateTableColumnStatistics(ColumnStatistics statsObj) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException; @@ -1054,60 +2313,146 @@ boolean updateTableColumnStatistics(ColumnStatistics statsObj) * @throws TException * @throws InvalidInputException */ - boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException; /** - * Get table column statistics given dbName, tableName and multiple colName-s - * @return ColumnStatistics struct for a given db, table and columns + * Get the column statistics for a set of columns in a table. This should only be used for + * non-partitioned tables. For partitioned tables use + * {@link #getPartitionColumnStatistics(String, String, List, List)}. + * @param dbName database name + * @param tableName table name + * @param colNames list of column names + * @return list of column statistics objects, one per column + * @throws NoSuchObjectException no such table + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error */ List getTableColumnStatistics(String dbName, String tableName, List colNames) throws NoSuchObjectException, MetaException, TException; /** - * Get partitions column statistics given dbName, tableName, multiple partitions and colName-s - * @return ColumnStatistics struct for a given db, table and columns + * Get the column statistics for a set of columns in a table. This should only be used for + * non-partitioned tables. For partitioned tables use + * {@link #getPartitionColumnStatistics(String, String, String, List, List)}. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param colNames list of column names + * @return list of column statistics objects, one per column + * @throws NoSuchObjectException no such table + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + List getTableColumnStatistics(String catName, String dbName, String tableName, + List colNames) + throws NoSuchObjectException, MetaException, TException; + + /** + * Get the column statistics for a set of columns in a partition. + * @param dbName database name + * @param tableName table name + * @param partNames partition names. Since these are names they should be of the form + * "key1=value1[/key2=value2...]" + * @param colNames list of column names + * @return map of columns to statistics + * @throws NoSuchObjectException no such partition + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error */ Map> getPartitionColumnStatistics(String dbName, String tableName, List partNames, List colNames) throws NoSuchObjectException, MetaException, TException; /** - * Delete partition level column statistics given dbName, tableName, partName and colName - * @param dbName - * @param tableName - * @param partName - * @param colName + * Get the column statistics for a set of columns in a partition. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partNames partition names. Since these are names they should be of the form + * "key1=value1[/key2=value2...]" + * @param colNames list of column names + * @return map of columns to statistics + * @throws NoSuchObjectException no such partition + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + Map> getPartitionColumnStatistics( + String catName, String dbName, String tableName, List partNames, List colNames) + throws NoSuchObjectException, MetaException, TException; + + /** + * Delete partition level column statistics given dbName, tableName, partName and colName, or + * all columns in a partition. + * @param dbName database name. + * @param tableName table name. + * @param partName partition name. + * @param colName column name, or null for all columns * @return boolean indicating outcome of the operation - * @throws NoSuchObjectException - * @throws InvalidObjectException - * @throws MetaException - * @throws TException - * @throws InvalidInputException + * @throws NoSuchObjectException no such partition exists + * @throws InvalidObjectException error dropping the stats data + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + * @throws InvalidInputException input is invalid or null. */ - boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException; /** - * Delete table level column statistics given dbName, tableName and colName - * @param dbName - * @param tableName - * @param colName + * Delete partition level column statistics given dbName, tableName, partName and colName, or + * all columns in a partition. + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name. + * @param partName partition name. + * @param colName column name, or null for all columns + * @return boolean indicating outcome of the operation + * @throws NoSuchObjectException no such partition exists + * @throws InvalidObjectException error dropping the stats data + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + * @throws InvalidInputException input is invalid or null. + */ + boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, + String partName, String colName) + throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException; + + /** + * Delete table level column statistics given dbName, tableName and colName, or all columns in + * a table. This should be used for non-partitioned tables. + * @param dbName database name + * @param tableName table name + * @param colName column name, or null to drop stats for all columns * @return boolean indicating the outcome of the operation - * @throws NoSuchObjectException - * @throws MetaException - * @throws InvalidObjectException - * @throws TException - * @throws InvalidInputException + * @throws NoSuchObjectException No such table + * @throws MetaException error accessing the RDBMS + * @throws InvalidObjectException error dropping the stats + * @throws TException thrift transport error + * @throws InvalidInputException bad input, like a null table name. */ boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException; /** + * Delete table level column statistics given dbName, tableName and colName, or all columns in + * a table. This should be used for non-partitioned tables. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param colName column name, or null to drop stats for all columns + * @return boolean indicating the outcome of the operation + * @throws NoSuchObjectException No such table + * @throws MetaException error accessing the RDBMS + * @throws InvalidObjectException error dropping the stats + * @throws TException thrift transport error + * @throws InvalidInputException bad input, like a null table name. + */ + boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, String colName) + throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException; + + /** * @param role * role object * @return true on success @@ -1267,23 +2612,117 @@ void updateMasterKey(Integer seqNo, String key) String[] getMasterKeys() throws TException; + /** + * Create a new function. + * @param func function specification + * @throws InvalidObjectException the function object is invalid + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ void createFunction(Function func) throws InvalidObjectException, MetaException, TException; + /** + * Alter a function. + * @param dbName database name. + * @param funcName function name. + * @param newFunction new function specification. This should be complete, not just the changes. + * @throws InvalidObjectException the function object is invalid + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ void alterFunction(String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException, TException; + /** + * Alter a function. + * @param catName catalog name. + * @param dbName database name. + * @param funcName function name. + * @param newFunction new function specification. This should be complete, not just the changes. + * @throws InvalidObjectException the function object is invalid + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + void alterFunction(String catName, String dbName, String funcName, Function newFunction) + throws InvalidObjectException, MetaException, TException; + + /** + * Drop a function. + * @param dbName database name. + * @param funcName function name. + * @throws MetaException error accessing the RDBMS + * @throws NoSuchObjectException no such function + * @throws InvalidObjectException not sure when this is thrown + * @throws InvalidInputException not sure when this is thrown + * @throws TException thrift transport error + */ void dropFunction(String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, TException; + /** + * Drop a function. + * @param catName catalog name. + * @param dbName database name. + * @param funcName function name. + * @throws MetaException error accessing the RDBMS + * @throws NoSuchObjectException no such function + * @throws InvalidObjectException not sure when this is thrown + * @throws InvalidInputException not sure when this is thrown + * @throws TException thrift transport error + */ + void dropFunction(String catName, String dbName, String funcName) throws MetaException, + NoSuchObjectException, InvalidObjectException, InvalidInputException, TException; + + /** + * Get a function. + * @param dbName database name. + * @param funcName function name. + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ Function getFunction(String dbName, String funcName) throws MetaException, TException; + /** + * Get a function. + * @param catName catalog name. + * @param dbName database name. + * @param funcName function name. + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + Function getFunction(String catName, String dbName, String funcName) + throws MetaException, TException; + + /** + * Get all functions matching a pattern + * @param dbName database name. + * @param pattern to match. This is a java regex pattern. + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ List getFunctions(String dbName, String pattern) throws MetaException, TException; - GetAllFunctionsResponse getAllFunctions() - throws MetaException, TException; + /** + * Get all functions matching a pattern + * @param catName catalog name. + * @param dbName database name. + * @param pattern to match. This is a java regex pattern. + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + List getFunctions(String catName, String dbName, String pattern) + throws MetaException, TException; + + /** + * Get all functions in the default catalog. + * @return list of functions + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + GetAllFunctionsResponse getAllFunctions() throws MetaException, TException; /** * Get a structure that details valid transactions. @@ -1702,10 +3141,48 @@ GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest ge GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( GetRoleGrantsForPrincipalRequest getRolePrincReq) throws MetaException, TException; - public AggrStats getAggrColStatsFor(String dbName, String tblName, + /** + * Get aggregated column stats for a set of partitions. + * @param dbName database name + * @param tblName table name + * @param colNames list of column names + * @param partName list of partition names (not values). + * @return aggregated stats for requested partitions + * @throws NoSuchObjectException no such table + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport exception + */ + AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, List partName) throws NoSuchObjectException, MetaException, TException; - boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException; + /** + * Get aggregated column stats for a set of partitions. + * @param catName catalog name + * @param dbName database name + * @param tblName table name + * @param colNames list of column names + * @param partNames list of partition names (not values). + * @return aggregated stats for requested partitions + * @throws NoSuchObjectException no such table + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport exception + */ + AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, + List colNames, List partNames) + throws NoSuchObjectException, MetaException, TException; + + /** + * Set table or partition column statistics. + * @param request request object, contains all the table, partition, and statistics information + * @return true if the set was successful. + * @throws NoSuchObjectException the table, partition, or columns specified do not exist. + * @throws InvalidObjectException the stats object is not valid. + * @throws MetaException error accessing the RDBMS. + * @throws TException thrift transport error. + * @throws InvalidInputException the input is invalid (eg, a null table name) + */ + boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException; /** * Flush any catalog objects held by the metastore implementation. Note that this does not @@ -1737,15 +3214,47 @@ public AggrStats getAggrColStatsFor(String dbName, String tblName, boolean cacheFileMetadata(String dbName, String tableName, String partName, boolean allParts) throws TException; + /** + * Get a primary key for a table. + * @param request Request info + * @return List of primary key columns + * @throws MetaException error reading the RDBMS + * @throws NoSuchObjectException no primary key exists on this table, or maybe no such table + * @throws TException thrift transport error + */ List getPrimaryKeys(PrimaryKeysRequest request) throws MetaException, NoSuchObjectException, TException; + /** + * Get a foreign key for a table. + * @param request Request info + * @return List of foreign key columns + * @throws MetaException error reading the RDBMS + * @throws NoSuchObjectException no foreign key exists on this table, or maybe no such table + * @throws TException thrift transport error + */ List getForeignKeys(ForeignKeysRequest request) throws MetaException, NoSuchObjectException, TException; + /** + * Get a unique constraint for a table. + * @param request Request info + * @return List of unique constraint columns + * @throws MetaException error reading the RDBMS + * @throws NoSuchObjectException no unique constraint on this table, or maybe no such table + * @throws TException thrift transport error + */ List getUniqueConstraints(UniqueConstraintsRequest request) throws MetaException, NoSuchObjectException, TException; + /** + * Get a not null constraint for a table. + * @param request Request info + * @return List of not null constraint columns + * @throws MetaException error reading the RDBMS + * @throws NoSuchObjectException no not null constraint on this table, or maybe no such table + * @throws TException thrift transport error + */ List getNotNullConstraints(NotNullConstraintsRequest request) throws MetaException, NoSuchObjectException, TException; @@ -1764,18 +3273,72 @@ void createTableWithConstraints( List checkConstraints) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException; - void dropConstraint(String dbName, String tableName, String constraintName) throws - MetaException, NoSuchObjectException, TException; + /** + * Drop a constraint. This can be used for primary keys, foreign keys, unique constraints, or + * not null constraints. + * @param dbName database name + * @param tableName table name + * @param constraintName name of the constraint + * @throws MetaException RDBMS access error + * @throws NoSuchObjectException no such constraint exists + * @throws TException thrift transport error + */ + void dropConstraint(String dbName, String tableName, String constraintName) + throws MetaException, NoSuchObjectException, TException; + + /** + * Drop a constraint. This can be used for primary keys, foreign keys, unique constraints, or + * not null constraints. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param constraintName name of the constraint + * @throws MetaException RDBMS access error + * @throws NoSuchObjectException no such constraint exists + * @throws TException thrift transport error + */ + void dropConstraint(String catName, String dbName, String tableName, String constraintName) + throws MetaException, NoSuchObjectException, TException; + + /** + * Add a primary key. + * @param primaryKeyCols Primary key columns. + * @throws MetaException error reading or writing to the RDBMS or a primary key already exists + * @throws NoSuchObjectException no such table exists + * @throws TException thrift transport error + */ void addPrimaryKey(List primaryKeyCols) throws MetaException, NoSuchObjectException, TException; + /** + * Add a foreign key + * @param foreignKeyCols Foreign key definition + * @throws MetaException error reading or writing to the RDBMS or foreign key already exists + * @throws NoSuchObjectException one of the tables in the foreign key does not exist. + * @throws TException thrift transport error + */ void addForeignKey(List foreignKeyCols) throws MetaException, NoSuchObjectException, TException; + /** + * Add a unique constraint + * @param uniqueConstraintCols Unique constraint definition + * @throws MetaException error reading or writing to the RDBMS or unique constraint already exists + * @throws NoSuchObjectException no such table + * @throws TException thrift transport error + */ void addUniqueConstraint(List uniqueConstraintCols) throws MetaException, NoSuchObjectException, TException; + /** + * Add a not null constraint + * @param notNullConstraintCols Notnull constraint definition + * @throws MetaException error reading or writing to the RDBMS or not null constraint already + * exists + * @throws NoSuchObjectException no such table + * @throws TException thrift transport error + */ void addNotNullConstraint(List notNullConstraintCols) throws MetaException, NoSuchObjectException, TException; @@ -1830,16 +3393,16 @@ void createWMPool(WMPool pool) throws NoSuchObjectException, InvalidObjectException, MetaException, TException; void alterWMPool(WMNullablePool pool, String poolPath) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException; + throws NoSuchObjectException, InvalidObjectException, TException; void dropWMPool(String resourcePlanName, String poolPath) - throws NoSuchObjectException, MetaException, TException; + throws TException; void createOrUpdateWMMapping(WMMapping mapping, boolean isUpdate) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException; + throws TException; void dropWMMapping(WMMapping mapping) - throws NoSuchObjectException, MetaException, TException; + throws TException; void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath, boolean shouldDrop) throws AlreadyExistsException, NoSuchObjectException, @@ -1858,6 +3421,7 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam /** * Alter an existing schema. + * @param catName catalog name * @param dbName database the schema is in * @param schemaName name of the schema * @param newSchema altered schema object @@ -1865,10 +3429,11 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void alterISchema(String dbName, String schemaName, ISchema newSchema) throws TException; + void alterISchema(String catName, String dbName, String schemaName, ISchema newSchema) throws TException; /** * Fetch a schema. + * @param catName catalog name * @param dbName database the schema is in * @param name name of the schema * @return the schema or null if no such schema @@ -1876,10 +3441,11 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - ISchema getISchema(String dbName, String name) throws TException; + ISchema getISchema(String catName, String dbName, String name) throws TException; /** * Drop an existing schema. If there are schema versions of this, this call will fail. + * @param catName catalog name * @param dbName database the schema is in * @param name name of the schema to drop * @throws NoSuchObjectException no schema with this name could be found @@ -1887,7 +3453,7 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void dropISchema(String dbName, String name) throws TException; + void dropISchema(String catName, String dbName, String name) throws TException; /** * Add a new version to an existing schema. @@ -1909,10 +3475,11 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - SchemaVersion getSchemaVersion(String dbName, String schemaName, int version) throws TException; + SchemaVersion getSchemaVersion(String catName, String dbName, String schemaName, int version) throws TException; /** * Get the latest version of a schema. + * @param catName catalog name * @param dbName database the schema is in * @param schemaName name of the schema * @return latest version of the schema or null if the schema does not exist or there are no @@ -1921,10 +3488,11 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - SchemaVersion getSchemaLatestVersion(String dbName, String schemaName) throws TException; + SchemaVersion getSchemaLatestVersion(String catName, String dbName, String schemaName) throws TException; /** * Get all the extant versions of a schema. + * @param catName catalog name * @param dbName database the schema is in * @param schemaName name of the schema. * @return list of all the schema versions or null if this schema does not exist or has no @@ -1933,12 +3501,13 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - List getSchemaAllVersions(String dbName, String schemaName) throws TException; + List getSchemaAllVersions(String catName, String dbName, String schemaName) throws TException; /** * Drop a version of a schema. Given that versions are supposed to be immutable you should * think really hard before you call this method. It should only be used for schema versions * that were added in error and never referenced any data. + * @param catName catalog name * @param dbName database the schema is in * @param schemaName name of the schema * @param version version of the schema @@ -1946,7 +3515,7 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void dropSchemaVersion(String dbName, String schemaName, int version) throws TException; + void dropSchemaVersion(String catName, String dbName, String schemaName, int version) throws TException; /** * Find all schema versions that have columns that match a query. @@ -1961,6 +3530,7 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam /** * Map a schema version to a serde. This mapping is one-to-one, thus this will destroy any * previous mappings for this schema version. + * @param catName catalog name * @param dbName database the schema is in * @param schemaName name of the schema * @param version version of the schema @@ -1970,10 +3540,11 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void mapSchemaVersionToSerde(String dbName, String schemaName, int version, String serdeName) throws TException; + void mapSchemaVersionToSerde(String catName, String dbName, String schemaName, int version, String serdeName) throws TException; /** * Set the state of a schema version. + * @param catName catalog name * @param dbName database the schema is in * @param schemaName name of the schema * @param version version of the schema @@ -1983,7 +3554,7 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void setSchemaVersionState(String dbName, String schemaName, int version, SchemaVersionState state) throws TException; + void setSchemaVersionState(String catName, String dbName, String schemaName, int version, SchemaVersionState state) throws TException; /** * Add a serde. This is primarily intended for use with SchemaRegistry objects, since serdes diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java index 1636d48d2c..80cb1de75e 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java @@ -130,10 +130,13 @@ public synchronized void init(Configuration conf, IHMSHandler handler) { public void run() { try { RawStore store = handler.getMS(); - for (String dbName : store.getAllDatabases()) { - for (Table mv : store.getTableObjectsByName(dbName, store.getTables(dbName, null, TableType.MATERIALIZED_VIEW))) { - addMaterializedView(mv.getDbName(), mv.getTableName(), ImmutableSet.copyOf(mv.getCreationMetadata().getTablesUsed()), - mv.getCreationMetadata().getValidTxnList(), OpType.LOAD); + for (String catName : store.getCatalogs()) { + for (String dbName : store.getAllDatabases(catName)) { + for (Table mv : store.getTableObjectsByName(catName, dbName, + store.getTables(catName, dbName, null, TableType.MATERIALIZED_VIEW))) { + addMaterializedView(mv.getDbName(), mv.getTableName(), ImmutableSet.copyOf(mv.getCreationMetadata().getTablesUsed()), + mv.getCreationMetadata().getValidTxnList(), OpType.LOAD); + } } } LOG.info("Initialized materializations invalidation cache"); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 0dcf1170a7..751441a737 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -19,7 +19,9 @@ package org.apache.hadoop.hive.metastore; import static org.apache.commons.lang.StringUtils.join; +import static org.apache.commons.lang.StringUtils.normalizeSpace; import static org.apache.commons.lang.StringUtils.repeat; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import java.sql.Blob; import java.sql.Clob; @@ -311,17 +313,19 @@ private void executeNoResult(final String queryText) throws SQLException { } } - public Database getDatabase(String dbName) throws MetaException{ + public Database getDatabase(String catName, String dbName) throws MetaException{ Query queryDbSelector = null; Query queryDbParams = null; try { dbName = dbName.toLowerCase(); + catName = catName.toLowerCase(); String queryTextDbSelector= "select " + "\"DB_ID\", \"NAME\", \"DB_LOCATION_URI\", \"DESC\", " - + "\"OWNER_NAME\", \"OWNER_TYPE\" " - + "FROM "+ DBS +" where \"NAME\" = ? "; - Object[] params = new Object[] { dbName }; + + "\"OWNER_NAME\", \"OWNER_TYPE\", \"CTLG_NAME\" " + + "FROM "+ DBS + + " where \"NAME\" = ? and \"CTLG_NAME\" = ? "; + Object[] params = new Object[] { dbName, catName }; queryDbSelector = pm.newQuery("javax.jdo.query.SQL", queryTextDbSelector); if (LOG.isTraceEnabled()) { @@ -370,6 +374,7 @@ public Database getDatabase(String dbName) throws MetaException{ String type = extractSqlString(dbline[5]); db.setOwnerType( (null == type || type.trim().isEmpty()) ? null : PrincipalType.valueOf(type)); + db.setCatalogName(extractSqlString(dbline[6])); db.setParameters(MetaStoreUtils.trimMapNulls(dbParams,convertMapNullsToEmptyStrings)); if (LOG.isDebugEnabled()){ LOG.debug("getDatabase: directsql returning db " + db.getName() @@ -389,20 +394,22 @@ public Database getDatabase(String dbName) throws MetaException{ /** * Get table names by using direct SQL queries. - * + * @param catName catalog name * @param dbName Metastore database namme * @param tableType Table type, or null if we want to get all tables * @return list of table names */ - public List getTables(String dbName, TableType tableType) throws MetaException { + public List getTables(String catName, String dbName, TableType tableType) + throws MetaException { String queryText = "SELECT " + TBLS + ".\"TBL_NAME\"" + " FROM " + TBLS + " " + " INNER JOIN " + DBS + " ON " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " - + " WHERE " + DBS + ".\"NAME\" = ? " + + " WHERE " + DBS + ".\"NAME\" = ? AND " + DBS + ".\"CTLG_NAME\" = ? " + (tableType == null ? "" : "AND " + TBLS + ".\"TBL_TYPE\" = ? ") ; - List pms = new ArrayList(); + List pms = new ArrayList<>(); pms.add(dbName); + pms.add(catName); if (tableType != null) { pms.add(tableType.toString()); } @@ -436,13 +443,15 @@ public Database getDatabase(String dbName) throws MetaException{ /** * Gets partitions by using direct SQL queries. * Note that batching is not needed for this method - list of names implies the batch size; + * @param catName Metastore catalog name. * @param dbName Metastore db name. * @param tblName Metastore table name. * @param partNames Partition names to get. * @return List of partitions. */ - public List getPartitionsViaSqlFilter(final String dbName, final String tblName, - List partNames) throws MetaException { + public List getPartitionsViaSqlFilter(final String catName, final String dbName, + final String tblName, List partNames) + throws MetaException { if (partNames.isEmpty()) { return Collections.emptyList(); } @@ -450,7 +459,7 @@ public Database getDatabase(String dbName) throws MetaException{ @Override public List run(List input) throws MetaException { String filter = "" + PARTITIONS + ".\"PART_NAME\" in (" + makeParams(input.size()) + ")"; - return getPartitionsViaSqlFilterInternal(dbName, tblName, null, filter, input, + return getPartitionsViaSqlFilterInternal(catName, dbName, tblName, null, filter, input, Collections.emptyList(), null); } }); @@ -465,13 +474,15 @@ public Database getDatabase(String dbName) throws MetaException{ public List getPartitionsViaSqlFilter( SqlFilterForPushdown filter, Integer max) throws MetaException { Boolean isViewTable = isViewTable(filter.table); - return getPartitionsViaSqlFilterInternal(filter.table.getDbName(), filter.table.getTableName(), - isViewTable, filter.filter, filter.params, filter.joins, max); + String catName = filter.table.isSetCatName() ? filter.table.getCatName() : + DEFAULT_CATALOG_NAME; + return getPartitionsViaSqlFilterInternal(catName, filter.table.getDbName(), + filter.table.getTableName(), isViewTable, filter.filter, filter.params, filter.joins, max); } public static class SqlFilterForPushdown { - private final List params = new ArrayList(); - private final List joins = new ArrayList(); + private final List params = new ArrayList<>(); + private final List joins = new ArrayList<>(); private String filter; private Table table; } @@ -488,14 +499,15 @@ public boolean generateSqlFilterForPushdown( /** * Gets all partitions of a table by using direct SQL queries. + * @param catName Metastore catalog name. * @param dbName Metastore db name. * @param tblName Metastore table name. * @param max The maximum number of partitions to return. * @return List of partitions. */ - public List getPartitions( + public List getPartitions(String catName, String dbName, String tblName, Integer max) throws MetaException { - return getPartitionsViaSqlFilterInternal(dbName, tblName, null, + return getPartitionsViaSqlFilterInternal(catName, dbName, tblName, null, null, Collections.emptyList(), Collections.emptyList(), max); } @@ -504,13 +516,13 @@ private static Boolean isViewTable(Table t) { t.getTableType().equals(TableType.VIRTUAL_VIEW.toString()) : null; } - private boolean isViewTable(String dbName, String tblName) throws MetaException { + private boolean isViewTable(String catName, String dbName, String tblName) throws MetaException { Query query = null; try { String queryText = "select \"TBL_TYPE\" from " + TBLS + "" + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " + - " where " + TBLS + ".\"TBL_NAME\" = ? and " + DBS + ".\"NAME\" = ?"; - Object[] params = new Object[] { tblName, dbName }; + " where " + TBLS + ".\"TBL_NAME\" = ? and " + DBS + ".\"NAME\" = ? and " + DBS + ".\"CTLG_NAME\" = ?"; + Object[] params = new Object[] { tblName, dbName, catName }; query = pm.newQuery("javax.jdo.query.SQL", queryText); query.setUnique(true); Object result = executeWithArray(query, params, queryText); @@ -536,11 +548,13 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException * @param max The maximum number of partitions to return. * @return List of partition objects. */ - private List getPartitionsViaSqlFilterInternal(String dbName, String tblName, - final Boolean isView, String sqlFilter, List paramsForFilter, - List joinsForFilter, Integer max) throws MetaException { + private List getPartitionsViaSqlFilterInternal( + String catName, String dbName, String tblName, final Boolean isView, String sqlFilter, + List paramsForFilter, List joinsForFilter,Integer max) + throws MetaException { boolean doTrace = LOG.isDebugEnabled(); final String dbNameLcase = dbName.toLowerCase(), tblNameLcase = tblName.toLowerCase(); + final String catNameLcase = normalizeSpace(catName); // We have to be mindful of order during filtering if we are not returning all partitions. String orderForFilter = (max != null) ? " order by \"PART_NAME\" asc" : ""; @@ -559,12 +573,14 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " + " and " + DBS + ".\"NAME\" = ? " + join(joinsForFilter, ' ') - + (StringUtils.isBlank(sqlFilter) ? "" : (" where " + sqlFilter)) + orderForFilter; - Object[] params = new Object[paramsForFilter.size() + 2]; + + " where " + DBS + ".\"CTLG_NAME\" = ? " + + (StringUtils.isBlank(sqlFilter) ? "" : (" and " + sqlFilter)) + orderForFilter; + Object[] params = new Object[paramsForFilter.size() + 3]; params[0] = tblNameLcase; params[1] = dbNameLcase; + params[2] = catNameLcase; for (int i = 0; i < paramsForFilter.size(); ++i) { - params[i + 2] = paramsForFilter.get(i); + params[i + 3] = paramsForFilter.get(i); } long start = doTrace ? System.nanoTime() : 0; @@ -583,7 +599,8 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException List result = runBatched(sqlResult, new Batchable() { @Override public List run(List input) throws MetaException { - return getPartitionsFromPartitionIds(dbNameLcase, tblNameLcase, isView, input); + return getPartitionsFromPartitionIds(catNameLcase, dbNameLcase, tblNameLcase, isView, + input); } }); @@ -592,7 +609,7 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException } /** Should be called with the list short enough to not trip up Oracle/etc. */ - private List getPartitionsFromPartitionIds(String dbName, String tblName, + private List getPartitionsFromPartitionIds(String catName, String dbName, String tblName, Boolean isView, List partIdList) throws MetaException { boolean doTrace = LOG.isDebugEnabled(); int idStringWidth = (int)Math.ceil(Math.log10(partIdList.size())) + 1; // 1 for comma @@ -635,6 +652,7 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException StringBuilder colsSb = new StringBuilder(7); // We expect that there's only one field schema. tblName = tblName.toLowerCase(); dbName = dbName.toLowerCase(); + catName = catName.toLowerCase(); for (Object[] fields : sqlResult) { // Here comes the ugly part... long partitionId = extractSqlLong(fields[0]); @@ -644,7 +662,7 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException // A partition must have at least sdId and serdeId set, or nothing set if it's a view. if (sdId == null || serdeId == null) { if (isView == null) { - isView = isViewTable(dbName, tblName); + isView = isViewTable(catName, dbName, tblName); } if ((sdId != null || colId != null || serdeId != null) || !isView) { throw new MetaException("Unexpected null for one of the IDs, SD " + sdId + @@ -655,8 +673,9 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException Partition part = new Partition(); orderedResult.add(part); // Set the collection fields; some code might not check presence before accessing them. - part.setParameters(new HashMap()); + part.setParameters(new HashMap<>()); part.setValues(new ArrayList()); + part.setCatName(catName); part.setDbName(dbName); part.setTableName(tblName); if (fields[4] != null) part.setCreateTime(extractSqlInt(fields[4])); @@ -910,6 +929,7 @@ public void apply(SerDeInfo t, Object[] fields) { public int getNumPartitionsViaSqlFilter(SqlFilterForPushdown filter) throws MetaException { boolean doTrace = LOG.isDebugEnabled(); + String catName = filter.table.getCatName().toLowerCase(); String dbName = filter.table.getDbName().toLowerCase(); String tblName = filter.table.getTableName().toLowerCase(); @@ -920,13 +940,15 @@ public int getNumPartitionsViaSqlFilter(SqlFilterForPushdown filter) throws Meta + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " + " and " + DBS + ".\"NAME\" = ? " + join(filter.joins, ' ') - + (filter.filter == null || filter.filter.trim().isEmpty() ? "" : (" where " + filter.filter)); + + " where " + DBS + ".\"CTLG_NAME\" = ? " + + (filter.filter == null || filter.filter.trim().isEmpty() ? "" : (" and " + filter.filter)); - Object[] params = new Object[filter.params.size() + 2]; + Object[] params = new Object[filter.params.size() + 3]; params[0] = tblName; params[1] = dbName; + params[2] = catName; for (int i = 0; i < filter.params.size(); ++i) { - params[i + 2] = filter.params.get(i); + params[i + 3] = filter.params.get(i); } long start = doTrace ? System.nanoTime() : 0; @@ -1291,10 +1313,12 @@ public void visit(LeafNode node) throws MetaException { if (dbHasJoinCastBug) { // This is a workaround for DERBY-6358 and Oracle bug; it is pretty horrible. tableValue += (" and " + TBLS + ".\"TBL_NAME\" = ? and " + DBS + ".\"NAME\" = ? and " + + DBS + ".\"CTLG_NAME\" = ? and " + "\"FILTER" + partColIndex + "\".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\" and " + "\"FILTER" + partColIndex + "\".\"INTEGER_IDX\" = " + partColIndex); params.add(table.getTableName().toLowerCase()); params.add(table.getDbName().toLowerCase()); + params.add(table.getCatName().toLowerCase()); } tableValue += " then " + tableValue0 + " else null end)"; } @@ -1311,29 +1335,32 @@ public void visit(LeafNode node) throws MetaException { /** * Retrieve the column statistics for the specified columns of the table. NULL * is returned if the columns are not provided. + * @param catName the catalog name of the table * @param dbName the database name of the table * @param tableName the table name * @param colNames the list of the column names * @return the column statistics for the specified columns * @throws MetaException */ - public ColumnStatistics getTableStats(final String dbName, final String tableName, - List colNames, boolean enableBitVector) throws MetaException { + public ColumnStatistics getTableStats(final String catName, final String dbName, + final String tableName, List colNames, + boolean enableBitVector) throws MetaException { if (colNames == null || colNames.isEmpty()) { return null; } final boolean doTrace = LOG.isDebugEnabled(); - final String queryText0 = "select " + getStatsList(enableBitVector) + " from " + TAB_COL_STATS + " " - + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? and \"COLUMN_NAME\" in ("; + final String queryText0 = "select " + getStatsList(enableBitVector) + " from " + TAB_COL_STATS + + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? and \"COLUMN_NAME\" in ("; Batchable b = new Batchable() { @Override public List run(List input) throws MetaException { String queryText = queryText0 + makeParams(input.size()) + ")"; - Object[] params = new Object[input.size() + 2]; - params[0] = dbName; - params[1] = tableName; + Object[] params = new Object[input.size() + 3]; + params[0] = catName; + params[1] = dbName; + params[2] = tableName; for (int i = 0; i < input.size(); ++i) { - params[i + 2] = input.get(i); + params[i + 3] = input.get(i); } long start = doTrace ? System.nanoTime() : 0; Query query = pm.newQuery("javax.jdo.query.SQL", queryText); @@ -1357,7 +1384,7 @@ public ColumnStatistics getTableStats(final String dbName, final String tableNam return result; } - public AggrStats aggrColStatsForPartitions(String dbName, String tableName, + public AggrStats aggrColStatsForPartitions(String catName, String dbName, String tableName, List partNames, List colNames, boolean useDensityFunctionForNDVEstimation, double ndvTuner, boolean enableBitVector) throws MetaException { if (colNames.isEmpty() || partNames.isEmpty()) { @@ -1379,33 +1406,33 @@ public AggrStats aggrColStatsForPartitions(String dbName, String tableName, boolean computePartsFound = true; for (String colName : colNames) { // Check the cache first - colStatsAggrCached = aggrStatsCache.get(dbName, tableName, colName, partNames); + colStatsAggrCached = aggrStatsCache.get(catName, dbName, tableName, colName, partNames); if (colStatsAggrCached != null) { colStatsList.add(colStatsAggrCached.getColStats()); partsFound = colStatsAggrCached.getNumPartsCached(); } else { if (computePartsFound) { - partsFound = partsFoundForPartitions(dbName, tableName, partNames, colNames); + partsFound = partsFoundForPartitions(catName, dbName, tableName, partNames, colNames); computePartsFound = false; } - List colNamesForDB = new ArrayList(); + List colNamesForDB = new ArrayList<>(); colNamesForDB.add(colName); // Read aggregated stats for one column colStatsAggrFromDB = - columnStatisticsObjForPartitions(dbName, tableName, partNames, colNamesForDB, + columnStatisticsObjForPartitions(catName, dbName, tableName, partNames, colNamesForDB, partsFound, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector); if (!colStatsAggrFromDB.isEmpty()) { ColumnStatisticsObj colStatsAggr = colStatsAggrFromDB.get(0); colStatsList.add(colStatsAggr); // Update the cache to add this new aggregate node - aggrStatsCache.add(dbName, tableName, colName, partsFound, colStatsAggr, bloomFilter); + aggrStatsCache.add(catName, dbName, tableName, colName, partsFound, colStatsAggr, bloomFilter); } } } } else { - partsFound = partsFoundForPartitions(dbName, tableName, partNames, colNames); + partsFound = partsFoundForPartitions(catName, dbName, tableName, partNames, colNames); colStatsList = - columnStatisticsObjForPartitions(dbName, tableName, partNames, colNames, partsFound, + columnStatisticsObjForPartitions(catName, dbName, tableName, partNames, colNames, partsFound, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector); } LOG.info("useDensityFunctionForNDVEstimation = " + useDensityFunctionForNDVEstimation @@ -1423,12 +1450,13 @@ private BloomFilter createPartsBloomFilter(int maxPartsPerCacheNode, double fpp, return bloomFilter; } - private long partsFoundForPartitions(final String dbName, final String tableName, + private long partsFoundForPartitions( + final String catName, final String dbName, final String tableName, final List partNames, List colNames) throws MetaException { assert !colNames.isEmpty() && !partNames.isEmpty(); final boolean doTrace = LOG.isDebugEnabled(); final String queryText0 = "select count(\"COLUMN_NAME\") from " + PART_COL_STATS + "" - + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + " and \"COLUMN_NAME\" in (%1$s) and \"PARTITION_NAME\" in (%2$s)" + " group by \"PARTITION_NAME\""; List allCounts = runBatched(colNames, new Batchable() { @@ -1444,7 +1472,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName Query query = pm.newQuery("javax.jdo.query.SQL", queryText); try { Object qResult = executeWithArray(query, prepareParams( - dbName, tableName, inputPartNames, inputColName), queryText); + catName, dbName, tableName, inputPartNames, inputColName), queryText); long end = doTrace ? System.nanoTime() : 0; timingTrace(doTrace, queryText, start, end); ForwardQueryResult fqr = (ForwardQueryResult) qResult; @@ -1469,7 +1497,8 @@ private long partsFoundForPartitions(final String dbName, final String tableName return partsFound; } - private List columnStatisticsObjForPartitions(final String dbName, + private List columnStatisticsObjForPartitions( + final String catName, final String dbName, final String tableName, final List partNames, List colNames, long partsFound, final boolean useDensityFunctionForNDVEstimation, final double ndvTuner, final boolean enableBitVector) throws MetaException { final boolean areAllPartsFound = (partsFound == partNames.size()); @@ -1479,7 +1508,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName return runBatched(partNames, new Batchable() { @Override public List run(List inputPartNames) throws MetaException { - return columnStatisticsObjForPartitionsBatch(dbName, tableName, inputPartNames, + return columnStatisticsObjForPartitionsBatch(catName, dbName, tableName, inputPartNames, inputColNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector); } }); @@ -1487,10 +1516,10 @@ private long partsFoundForPartitions(final String dbName, final String tableName }); } - public List getColStatsForAllTablePartitions(String dbName, + public List getColStatsForAllTablePartitions(String catName, String dbName, boolean enableBitVector) throws MetaException { String queryText = "select \"TABLE_NAME\", \"PARTITION_NAME\", " + getStatsList(enableBitVector) - + " from " + " " + PART_COL_STATS + " where \"DB_NAME\" = ?"; + + " from " + " " + PART_COL_STATS + " where \"DB_NAME\" = ? and \"CAT_NAME\" = ?"; long start = 0; long end = 0; Query query = null; @@ -1500,7 +1529,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName List colStatsForDB = new ArrayList(); try { query = pm.newQuery("javax.jdo.query.SQL", queryText); - qResult = executeWithArray(query, new Object[] { dbName }, queryText); + qResult = executeWithArray(query, new Object[] { dbName, catName }, queryText); if (qResult == null) { query.closeAll(); return colStatsForDB; @@ -1512,7 +1541,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName String tblName = (String) row[0]; String partName = (String) row[1]; ColumnStatisticsObj colStatObj = prepareCSObj(row, 2); - colStatsForDB.add(new ColStatsObjWithSourceInfo(colStatObj, dbName, tblName, partName)); + colStatsForDB.add(new ColStatsObjWithSourceInfo(colStatObj, catName, dbName, tblName, partName)); Deadline.checkTimeout(); } } finally { @@ -1522,31 +1551,31 @@ private long partsFoundForPartitions(final String dbName, final String tableName } /** Should be called with the list short enough to not trip up Oracle/etc. */ - private List columnStatisticsObjForPartitionsBatch(String dbName, + private List columnStatisticsObjForPartitionsBatch(String catName, String dbName, String tableName, List partNames, List colNames, boolean areAllPartsFound, boolean useDensityFunctionForNDVEstimation, double ndvTuner, boolean enableBitVector) throws MetaException { if (enableBitVector) { - return aggrStatsUseJava(dbName, tableName, partNames, colNames, areAllPartsFound, + return aggrStatsUseJava(catName, dbName, tableName, partNames, colNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner); } else { - return aggrStatsUseDB(dbName, tableName, partNames, colNames, areAllPartsFound, + return aggrStatsUseDB(catName, dbName, tableName, partNames, colNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner); } } - private List aggrStatsUseJava(String dbName, String tableName, + private List aggrStatsUseJava(String catName, String dbName, String tableName, List partNames, List colNames, boolean areAllPartsFound, boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException { // 1. get all the stats for colNames in partNames; List partStats = - getPartitionStats(dbName, tableName, partNames, colNames, true); + getPartitionStats(catName, dbName, tableName, partNames, colNames, true); // 2. use util function to aggr stats - return MetaStoreUtils.aggrPartitionStats(partStats, dbName, tableName, partNames, colNames, + return MetaStoreUtils.aggrPartitionStats(partStats, catName, dbName, tableName, partNames, colNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner); } - private List aggrStatsUseDB(String dbName, + private List aggrStatsUseDB(String catName, String dbName, String tableName, List partNames, List colNames, boolean areAllPartsFound, boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException { // TODO: all the extrapolation logic should be moved out of this class, @@ -1573,7 +1602,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName + "avg((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\")," + "avg((cast(\"BIG_DECIMAL_HIGH_VALUE\" as decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\")," + "sum(\"NUM_DISTINCTS\")" + " from " + PART_COL_STATS + "" - + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "; + + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "; String queryText = null; long start = 0; long end = 0; @@ -1589,7 +1618,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\""; start = doTrace ? System.nanoTime() : 0; query = pm.newQuery("javax.jdo.query.SQL", queryText); - qResult = executeWithArray(query, prepareParams(dbName, tableName, partNames, colNames), + qResult = executeWithArray(query, prepareParams(catName, dbName, tableName, partNames, colNames), queryText); if (qResult == null) { query.closeAll(); @@ -1612,13 +1641,13 @@ private long partsFoundForPartitions(final String dbName, final String tableName List colStats = new ArrayList(colNames.size()); queryText = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", count(\"PARTITION_NAME\") " + " from " + PART_COL_STATS - + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\""; start = doTrace ? System.nanoTime() : 0; query = pm.newQuery("javax.jdo.query.SQL", queryText); - qResult = executeWithArray(query, prepareParams(dbName, tableName, partNames, colNames), + qResult = executeWithArray(query, prepareParams(catName, dbName, tableName, partNames, colNames), queryText); end = doTrace ? System.nanoTime() : 0; timingTrace(doTrace, queryText, start, end); @@ -1653,7 +1682,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName start = doTrace ? System.nanoTime() : 0; query = pm.newQuery("javax.jdo.query.SQL", queryText); qResult = executeWithArray(query, - prepareParams(dbName, tableName, partNames, noExtraColumnNames), queryText); + prepareParams(catName, dbName, tableName, partNames, noExtraColumnNames), queryText); if (qResult == null) { query.closeAll(); return Collections.emptyList(); @@ -1677,7 +1706,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName // get sum for all columns to reduce the number of queries Map> sumMap = new HashMap>(); queryText = "select \"COLUMN_NAME\", sum(\"NUM_NULLS\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\"), sum(\"NUM_DISTINCTS\")" - + " from " + PART_COL_STATS + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + + " from " + PART_COL_STATS + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + " and \"COLUMN_NAME\" in (" + makeParams(extraColumnNameTypeParts.size()) + ") and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ") group by \"COLUMN_NAME\""; @@ -1686,7 +1715,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName List extraColumnNames = new ArrayList(); extraColumnNames.addAll(extraColumnNameTypeParts.keySet()); qResult = executeWithArray(query, - prepareParams(dbName, tableName, partNames, extraColumnNames), queryText); + prepareParams(catName, dbName, tableName, partNames, extraColumnNames), queryText); if (qResult == null) { query.closeAll(); return Collections.emptyList(); @@ -1750,20 +1779,20 @@ private long partsFoundForPartitions(final String dbName, final String tableName if (!decimal) { queryText = "select \"" + colStatName + "\",\"PARTITION_NAME\" from " + PART_COL_STATS - + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?" + + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + " order by \"" + colStatName + "\""; } else { queryText = "select \"" + colStatName + "\",\"PARTITION_NAME\" from " + PART_COL_STATS - + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?" + + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + " order by cast(\"" + colStatName + "\" as decimal)"; } start = doTrace ? System.nanoTime() : 0; query = pm.newQuery("javax.jdo.query.SQL", queryText); qResult = executeWithArray(query, - prepareParams(dbName, tableName, partNames, Arrays.asList(colName)), queryText); + prepareParams(catName, dbName, tableName, partNames, Arrays.asList(colName)), queryText); if (qResult == null) { query.closeAll(); return Collections.emptyList(); @@ -1786,13 +1815,13 @@ private long partsFoundForPartitions(final String dbName, final String tableName + "avg((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" as decimal))," + "avg((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\")," + "avg((cast(\"BIG_DECIMAL_HIGH_VALUE\" as decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\")" - + " from " + PART_COL_STATS + "" + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + + " from " + PART_COL_STATS + "" + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + " group by \"COLUMN_NAME\""; start = doTrace ? System.nanoTime() : 0; query = pm.newQuery("javax.jdo.query.SQL", queryText); qResult = executeWithArray(query, - prepareParams(dbName, tableName, partNames, Arrays.asList(colName)), queryText); + prepareParams(catName, dbName, tableName, partNames, Arrays.asList(colName)), queryText); if (qResult == null) { query.closeAll(); return Collections.emptyList(); @@ -1837,11 +1866,12 @@ private ColumnStatisticsObj prepareCSObjWithAdjustedNDV(Object[] row, int i, return cso; } - private Object[] prepareParams(String dbName, String tableName, List partNames, - List colNames) throws MetaException { + private Object[] prepareParams(String catName, String dbName, String tableName, + List partNames, List colNames) throws MetaException { - Object[] params = new Object[colNames.size() + partNames.size() + 2]; + Object[] params = new Object[colNames.size() + partNames.size() + 3]; int paramI = 0; + params[paramI++] = catName; params[paramI++] = dbName; params[paramI++] = tableName; for (String colName : colNames) { @@ -1854,14 +1884,16 @@ private ColumnStatisticsObj prepareCSObjWithAdjustedNDV(Object[] row, int i, return params; } - public List getPartitionStats(final String dbName, final String tableName, - final List partNames, List colNames, boolean enableBitVector) throws MetaException { + public List getPartitionStats( + final String catName, final String dbName, final String tableName, final List partNames, + List colNames, boolean enableBitVector) throws MetaException { if (colNames.isEmpty() || partNames.isEmpty()) { return Collections.emptyList(); } final boolean doTrace = LOG.isDebugEnabled(); final String queryText0 = "select \"PARTITION_NAME\", " + getStatsList(enableBitVector) + " from " - + " " + PART_COL_STATS + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? and \"COLUMN_NAME\"" + + " " + PART_COL_STATS + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? and " + + "\"COLUMN_NAME\"" + " in (%1$s) AND \"PARTITION_NAME\" in (%2$s) order by \"PARTITION_NAME\""; Batchable b = new Batchable() { @Override @@ -1874,7 +1906,7 @@ private ColumnStatisticsObj prepareCSObjWithAdjustedNDV(Object[] row, int i, long start = doTrace ? System.nanoTime() : 0; Query query = pm.newQuery("javax.jdo.query.SQL", queryText); Object qResult = executeWithArray(query, prepareParams( - dbName, tableName, inputPartNames, inputColNames), queryText); + catName, dbName, tableName, inputPartNames, inputColNames), queryText); timingTrace(doTrace, queryText0, start, (doTrace ? System.nanoTime() : 0)); if (qResult == null) { query.closeAll(); @@ -1904,6 +1936,7 @@ private ColumnStatisticsObj prepareCSObjWithAdjustedNDV(Object[] row, int i, continue; } else if (from != i) { ColumnStatisticsDesc csd = new ColumnStatisticsDesc(false, dbName, tableName); + csd.setCatName(catName); csd.setPartName(lastPartName); result.add(makeColumnStats(list.subList(from, i), csd, 1)); } @@ -2036,8 +2069,10 @@ public void closeAllQueries() { return result; } - public List getForeignKeys(String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException { - List ret = new ArrayList(); + public List getForeignKeys(String catName, String parent_db_name, + String parent_tbl_name, String foreign_db_name, + String foreign_tbl_name) throws MetaException { + List ret = new ArrayList<>(); String queryText = "SELECT \"D2\".\"NAME\", \"T2\".\"TBL_NAME\", " + "CASE WHEN \"C2\".\"COLUMN_NAME\" IS NOT NULL THEN \"C2\".\"COLUMN_NAME\" " @@ -2065,6 +2100,7 @@ public void closeAllQueries() { + " \"P2\".\"INTEGER_IDX\" = " + KEY_CONSTRAINTS + ".\"PARENT_INTEGER_IDX\" " + " WHERE " + KEY_CONSTRAINTS + ".\"CONSTRAINT_TYPE\" = " + MConstraint.FOREIGN_KEY_CONSTRAINT + " AND \"KEY_CONSTRAINTS2\".\"CONSTRAINT_TYPE\" = " + MConstraint.PRIMARY_KEY_CONSTRAINT + " AND" + + " " + DBS + ".\"CTLG_NAME\" = ? AND" + (foreign_db_name == null ? "" : " " + DBS + ".\"NAME\" = ? AND") + (foreign_tbl_name == null ? "" : " " + TBLS + ".\"TBL_NAME\" = ? AND") + (parent_tbl_name == null ? "" : " \"T2\".\"TBL_NAME\" = ? AND") @@ -2075,6 +2111,7 @@ public void closeAllQueries() { queryText = queryText.substring(0, queryText.length()-3); } List pms = new ArrayList(); + pms.add(catName); if (foreign_db_name != null) { pms.add(foreign_db_name); } @@ -2114,19 +2151,22 @@ public void closeAllQueries() { validate, rely ); - ret.add(currKey); + currKey.setCatName(catName); + ret.add(currKey); } } return ret; } - public List getPrimaryKeys(String db_name, String tbl_name) throws MetaException { - List ret = new ArrayList(); + public List getPrimaryKeys(String catName, String db_name, String tbl_name) + throws MetaException { + List ret = new ArrayList<>(); String queryText = "SELECT " + DBS + ".\"NAME\", " + TBLS + ".\"TBL_NAME\", " + "CASE WHEN " + COLUMNS_V2 + ".\"COLUMN_NAME\" IS NOT NULL THEN " + COLUMNS_V2 + ".\"COLUMN_NAME\" " + "ELSE " + PARTITION_KEYS + ".\"PKEY_NAME\" END, " + KEY_CONSTRAINTS + ".\"POSITION\", " - + "" + KEY_CONSTRAINTS + ".\"CONSTRAINT_NAME\", " + KEY_CONSTRAINTS + ".\"ENABLE_VALIDATE_RELY\" " + + KEY_CONSTRAINTS + ".\"CONSTRAINT_NAME\", " + KEY_CONSTRAINTS + ".\"ENABLE_VALIDATE_RELY\", " + + DBS + ".\"CTLG_NAME\"" + " from " + TBLS + " " + " INNER JOIN " + KEY_CONSTRAINTS + " ON " + TBLS + ".\"TBL_ID\" = " + KEY_CONSTRAINTS + ".\"PARENT_TBL_ID\" " + " INNER JOIN " + DBS + " ON " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " @@ -2135,6 +2175,7 @@ public void closeAllQueries() { + " LEFT OUTER JOIN " + PARTITION_KEYS + " ON " + TBLS + ".\"TBL_ID\" = " + PARTITION_KEYS + ".\"TBL_ID\" AND " + " " + PARTITION_KEYS + ".\"INTEGER_IDX\" = " + KEY_CONSTRAINTS + ".\"PARENT_INTEGER_IDX\" " + " WHERE " + KEY_CONSTRAINTS + ".\"CONSTRAINT_TYPE\" = "+ MConstraint.PRIMARY_KEY_CONSTRAINT + " AND" + + " " + DBS + ".\"CTLG_NAME\" = ? AND" + (db_name == null ? "" : " " + DBS + ".\"NAME\" = ? AND") + (tbl_name == null ? "" : " " + TBLS + ".\"TBL_NAME\" = ? ") ; @@ -2142,7 +2183,8 @@ public void closeAllQueries() { if (queryText.endsWith("AND")) { queryText = queryText.substring(0, queryText.length()-3); } - List pms = new ArrayList(); + List pms = new ArrayList<>(); + pms.add(catName); if (db_name != null) { pms.add(db_name); } @@ -2168,13 +2210,14 @@ public void closeAllQueries() { enable, validate, rely); - ret.add(currKey); + currKey.setCatName(extractSqlString(line[6])); + ret.add(currKey); } } return ret; } - public List getUniqueConstraints(String db_name, String tbl_name) + public List getUniqueConstraints(String catName, String db_name, String tbl_name) throws MetaException { List ret = new ArrayList(); String queryText = @@ -2190,6 +2233,7 @@ public void closeAllQueries() { + " LEFT OUTER JOIN " + PARTITION_KEYS + " ON " + TBLS + ".\"TBL_ID\" = " + PARTITION_KEYS + ".\"TBL_ID\" AND " + " " + PARTITION_KEYS + ".\"INTEGER_IDX\" = " + KEY_CONSTRAINTS + ".\"PARENT_INTEGER_IDX\" " + " WHERE " + KEY_CONSTRAINTS + ".\"CONSTRAINT_TYPE\" = "+ MConstraint.UNIQUE_CONSTRAINT + " AND" + + " " + DBS + ".\"CTLG_NAME\" = ? AND" + (db_name == null ? "" : " " + DBS + ".\"NAME\" = ? AND") + (tbl_name == null ? "" : " " + TBLS + ".\"TBL_NAME\" = ? ") ; @@ -2198,6 +2242,7 @@ public void closeAllQueries() { queryText = queryText.substring(0, queryText.length()-3); } List pms = new ArrayList(); + pms.add(catName); if (db_name != null) { pms.add(db_name); } @@ -2215,23 +2260,23 @@ public void closeAllQueries() { boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; - SQLUniqueConstraint currConstraint = new SQLUniqueConstraint( - extractSqlString(line[0]), - extractSqlString(line[1]), - extractSqlString(line[2]), - extractSqlInt(line[3]), extractSqlString(line[4]), - enable, - validate, - rely); - ret.add(currConstraint); + ret.add(new SQLUniqueConstraint( + catName, + extractSqlString(line[0]), + extractSqlString(line[1]), + extractSqlString(line[2]), + extractSqlInt(line[3]), extractSqlString(line[4]), + enable, + validate, + rely)); } } return ret; } - public List getNotNullConstraints(String db_name, String tbl_name) + public List getNotNullConstraints(String catName, String db_name, String tbl_name) throws MetaException { - List ret = new ArrayList(); + List ret = new ArrayList<>(); String queryText = "SELECT " + DBS + ".\"NAME\", " + TBLS + ".\"TBL_NAME\"," + "CASE WHEN " + COLUMNS_V2 + ".\"COLUMN_NAME\" IS NOT NULL THEN " + COLUMNS_V2 + ".\"COLUMN_NAME\" " @@ -2245,6 +2290,7 @@ public void closeAllQueries() { + " LEFT OUTER JOIN " + PARTITION_KEYS + " ON " + TBLS + ".\"TBL_ID\" = " + PARTITION_KEYS + ".\"TBL_ID\" AND " + " " + PARTITION_KEYS + ".\"INTEGER_IDX\" = " + KEY_CONSTRAINTS + ".\"PARENT_INTEGER_IDX\" " + " WHERE " + KEY_CONSTRAINTS + ".\"CONSTRAINT_TYPE\" = "+ MConstraint.NOT_NULL_CONSTRAINT + " AND" + + " " + DBS + ".\"CTLG_NAME\" = ? AND" + (db_name == null ? "" : " " + DBS + ".\"NAME\" = ? AND") + (tbl_name == null ? "" : " " + TBLS + ".\"TBL_NAME\" = ? ") ; @@ -2252,7 +2298,8 @@ public void closeAllQueries() { if (queryText.endsWith("AND")) { queryText = queryText.substring(0, queryText.length()-3); } - List pms = new ArrayList(); + List pms = new ArrayList<>(); + pms.add(catName); if (db_name != null) { pms.add(db_name); } @@ -2270,21 +2317,21 @@ public void closeAllQueries() { boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; - SQLNotNullConstraint currConstraint = new SQLNotNullConstraint( - extractSqlString(line[0]), - extractSqlString(line[1]), - extractSqlString(line[2]), - extractSqlString(line[3]), - enable, - validate, - rely); - ret.add(currConstraint); + ret.add(new SQLNotNullConstraint( + catName, + extractSqlString(line[0]), + extractSqlString(line[1]), + extractSqlString(line[2]), + extractSqlString(line[3]), + enable, + validate, + rely)); } } return ret; } - public List getDefaultConstraints(String db_name, String tbl_name) + public List getDefaultConstraints(String catName, String db_name, String tbl_name) throws MetaException { List ret = new ArrayList(); String queryText = @@ -2301,6 +2348,7 @@ public void closeAllQueries() { + " LEFT OUTER JOIN " + PARTITION_KEYS + " ON " + TBLS + ".\"TBL_ID\" = " + PARTITION_KEYS + ".\"TBL_ID\" AND " + " " + PARTITION_KEYS + ".\"INTEGER_IDX\" = " + KEY_CONSTRAINTS + ".\"PARENT_INTEGER_IDX\" " + " WHERE " + KEY_CONSTRAINTS + ".\"CONSTRAINT_TYPE\" = "+ MConstraint.DEFAULT_CONSTRAINT+ " AND" + + " " + DBS + ".\"CTLG_NAME\" = ? AND" + (db_name == null ? "" : " " + DBS + ".\"NAME\" = ? AND") + (tbl_name == null ? "" : " " + TBLS + ".\"TBL_NAME\" = ? ") ; @@ -2311,7 +2359,8 @@ public void closeAllQueries() { if (LOG.isDebugEnabled()){ LOG.debug("getDefaultConstraints: directsql : " + queryText); } - List pms = new ArrayList(); + List pms = new ArrayList<>(); + pms.add(catName); if (db_name != null) { pms.add(db_name); } @@ -2330,6 +2379,7 @@ public void closeAllQueries() { boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; SQLDefaultConstraint currConstraint = new SQLDefaultConstraint( + catName, extractSqlString(line[0]), extractSqlString(line[1]), extractSqlString(line[2]), @@ -2344,7 +2394,7 @@ public void closeAllQueries() { return ret; } - public List getCheckConstraints(String db_name, String tbl_name) + public List getCheckConstraints(String catName, String db_name, String tbl_name) throws MetaException { List ret = new ArrayList(); String queryText = @@ -2361,6 +2411,7 @@ public void closeAllQueries() { + " LEFT OUTER JOIN " + PARTITION_KEYS + " ON " + TBLS + ".\"TBL_ID\" = " + PARTITION_KEYS + ".\"TBL_ID\" AND " + " " + PARTITION_KEYS + ".\"INTEGER_IDX\" = " + KEY_CONSTRAINTS + ".\"PARENT_INTEGER_IDX\" " + " WHERE " + KEY_CONSTRAINTS + ".\"CONSTRAINT_TYPE\" = "+ MConstraint.CHECK_CONSTRAINT+ " AND" + + " " + DBS + ".\"CTLG_NAME\" = ? AND" + (db_name == null ? "" : " " + DBS + ".\"NAME\" = ? AND") + (tbl_name == null ? "" : " " + TBLS + ".\"TBL_NAME\" = ? ") ; @@ -2371,7 +2422,8 @@ public void closeAllQueries() { if (LOG.isDebugEnabled()){ LOG.debug("getCheckConstraints: directsql : " + queryText); } - List pms = new ArrayList(); + List pms = new ArrayList<>(); + pms.add(catName); if (db_name != null) { pms.add(db_name); } @@ -2390,6 +2442,7 @@ public void closeAllQueries() { boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; SQLCheckConstraint currConstraint = new SQLCheckConstraint( + catName, extractSqlString(line[0]), extractSqlString(line[1]), extractSqlString(line[2]), diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java index 67600e1e75..569fff0ad5 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java @@ -35,10 +35,12 @@ import org.apache.hadoop.hive.metastore.events.AlterSchemaVersionEvent; import org.apache.hadoop.hive.metastore.events.AlterTableEvent; import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent; +import org.apache.hadoop.hive.metastore.events.CreateCatalogEvent; import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent; import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent; import org.apache.hadoop.hive.metastore.events.CreateISchemaEvent; import org.apache.hadoop.hive.metastore.events.CreateTableEvent; +import org.apache.hadoop.hive.metastore.events.DropCatalogEvent; import org.apache.hadoop.hive.metastore.events.DropConstraintEvent; import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent; import org.apache.hadoop.hive.metastore.events.DropFunctionEvent; @@ -222,6 +224,12 @@ public void onDropSchemaVersion(DropSchemaVersionEvent dropSchemaVersionEvent) throws MetaException { } + public void onCreateCatalog(CreateCatalogEvent createCatalogEvent) throws MetaException { + } + + public void onDropCatalog(DropCatalogEvent dropCatalogEvent) throws MetaException { + } + @Override public Configuration getConf() { return this.conf; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java index 8522afee84..f7a0cd073c 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java @@ -22,12 +22,14 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionSpec; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TableMeta; /** * Metadata filter hook for metastore client. This will be useful for authorization @@ -39,11 +41,31 @@ public interface MetaStoreFilterHook { /** + * Filter a catalog object. Default implementation returns the passed in catalog. + * @param catalog catalog to filter + * @return filtered catalog + * @throws MetaException something bad happened + */ + default Catalog filterCatalog(Catalog catalog) throws MetaException { + return catalog; + } + + /** + * Filter a list of catalog names. Default implementation returns the passed in list. + * @param catalogs list of catalog names. + * @return filtered list of catalog names. + * @throws MetaException something bad happened. + */ + default List filterCatalogs(List catalogs) throws MetaException { + return catalogs; + } + + /** * Filter given list of databases * @param dbList * @return List of filtered Db names */ - public List filterDatabases(List dbList) throws MetaException; + List filterDatabases(List dbList) throws MetaException; /** * filter to given database object if applicable @@ -51,15 +73,27 @@ * @return the same database if it's not filtered out * @throws NoSuchObjectException */ - public Database filterDatabase(Database dataBase) throws MetaException, NoSuchObjectException; + Database filterDatabase(Database dataBase) throws MetaException, NoSuchObjectException; /** * Filter given list of tables - * @param dbName - * @param tableList + * @param catName catalog name + * @param dbName database name + * @param tableList list of table returned by the metastore * @return List of filtered table names */ - public List filterTableNames(String dbName, List tableList) throws MetaException; + List filterTableNames(String catName, String dbName, List tableList) + throws MetaException; + + // Previously this was handled by filterTableNames. But it can't be anymore because we can no + // longer depend on a 1-1 mapping between table name and entry in the list. + /** + * Filter a list of TableMeta objects. + * @param tableMetas list of TableMetas to filter + * @return filtered table metas + * @throws MetaException something went wrong + */ + List filterTableMetas(List tableMetas) throws MetaException; /** * filter to given table object if applicable @@ -67,28 +101,28 @@ * @return the same table if it's not filtered out * @throws NoSuchObjectException */ - public Table filterTable(Table table) throws MetaException, NoSuchObjectException; + Table filterTable(Table table) throws MetaException, NoSuchObjectException; /** * Filter given list of tables * @param tableList * @return List of filtered table names */ - public List
filterTables(List
tableList) throws MetaException; + List
filterTables(List
tableList) throws MetaException; /** * Filter given list of partitions * @param partitionList * @return */ - public List filterPartitions(List partitionList) throws MetaException; + List filterPartitions(List partitionList) throws MetaException; /** * Filter given list of partition specs * @param partitionSpecList * @return */ - public List filterPartitionSpecs(List partitionSpecList) + List filterPartitionSpecs(List partitionSpecList) throws MetaException; /** @@ -97,18 +131,17 @@ * @return the same partition object if it's not filtered out * @throws NoSuchObjectException */ - public Partition filterPartition(Partition partition) throws MetaException, NoSuchObjectException; + Partition filterPartition(Partition partition) throws MetaException, NoSuchObjectException; /** * Filter given list of partition names - * @param dbName - * @param tblName - * @param partitionNames - * @return + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partitionNames list of partition names. + * @return list of filtered partition names. */ - public List filterPartitionNames(String dbName, String tblName, + List filterPartitionNames(String catName, String dbName, String tblName, List partitionNames) throws MetaException; - - } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java index f5a91b440e..988fca6a6b 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java @@ -35,10 +35,12 @@ import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent; import org.apache.hadoop.hive.metastore.events.AlterSchemaVersionEvent; import org.apache.hadoop.hive.metastore.events.AlterTableEvent; +import org.apache.hadoop.hive.metastore.events.CreateCatalogEvent; import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent; import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent; import org.apache.hadoop.hive.metastore.events.CreateISchemaEvent; import org.apache.hadoop.hive.metastore.events.CreateTableEvent; +import org.apache.hadoop.hive.metastore.events.DropCatalogEvent; import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent; import org.apache.hadoop.hive.metastore.events.DropFunctionEvent; import org.apache.hadoop.hive.metastore.events.DropISchemaEvent; @@ -200,6 +202,10 @@ public void notify(MetaStoreEventListener listener, ListenerEvent event) throws listener.onDropSchemaVersion((DropSchemaVersionEvent) event); } }) + .put(EventType.CREATE_CATALOG, + (listener, event) -> listener.onCreateCatalog((CreateCatalogEvent)event)) + .put(EventType.DROP_CATALOG, + (listener, event) -> listener.onDropCatalog((DropCatalogEvent)event)) .build() ); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 3e1fea9d4f..161a1f3831 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -19,6 +19,9 @@ package org.apache.hadoop.hive.metastore; import static org.apache.commons.lang.StringUtils.join; +import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedDbName; +import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; import java.io.IOException; @@ -69,6 +72,7 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.exception.ExceptionUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -79,6 +83,8 @@ import org.apache.hadoop.hive.metastore.MetaStoreDirectSql.SqlFilterForPushdown; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.BasicTxnInfo; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -156,6 +162,7 @@ import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; +import org.apache.hadoop.hive.metastore.model.MCatalog; import org.apache.hadoop.hive.metastore.model.MColumnDescriptor; import org.apache.hadoop.hive.metastore.model.MConstraint; import org.apache.hadoop.hive.metastore.model.MCreationMetadata; @@ -203,6 +210,7 @@ import org.apache.hadoop.hive.metastore.utils.JavaUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.ObjectPair; +import org.apache.thrift.TDeserializer; import org.apache.thrift.TException; import org.datanucleus.AbstractNucleusContext; import org.datanucleus.ClassLoaderResolver; @@ -792,9 +800,127 @@ public void rollbackTransaction() { } @Override + public void createCatalog(Catalog cat) throws MetaException { + LOG.debug("Creating catalog " + cat.getName()); + boolean committed = false; + MCatalog mCat = catToMCat(cat); + try { + openTransaction(); + pm.makePersistent(mCat); + committed = commitTransaction(); + } finally { + if (!committed) rollbackTransaction(); + } + } + + @Override + public void alterCatalog(String catName, Catalog cat) + throws MetaException, InvalidOperationException { + if (!cat.getName().equals(catName)) { + throw new InvalidOperationException("You cannot change a catalog's name"); + } + boolean committed = false; + try { + MCatalog mCat = getMCatalog(catName); + if (org.apache.commons.lang.StringUtils.isNotBlank(cat.getLocationUri())) { + mCat.setLocationUri(cat.getLocationUri()); + } + if (org.apache.commons.lang.StringUtils.isNotBlank(cat.getDescription())) { + mCat.setDescription(cat.getDescription()); + } + openTransaction(); + pm.makePersistent(mCat); + committed = commitTransaction(); + } finally { + if (!committed) rollbackTransaction(); + } + } + + @Override + public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException { + LOG.debug("Fetching catalog " + catalogName); + MCatalog mCat = getMCatalog(catalogName); + if (mCat == null) throw new NoSuchObjectException("No catalog " + catalogName); + return mCatToCat(mCat); + } + + @Override + public List getCatalogs() throws MetaException { + LOG.debug("Fetching all catalog names"); + boolean commited = false; + List catalogs = null; + + String queryStr = "select name from org.apache.hadoop.hive.metastore.model.MCatalog"; + Query query = null; + + openTransaction(); + try { + query = pm.newQuery(queryStr); + query.setResult("name"); + catalogs = new ArrayList<>((Collection) query.execute()); + commited = commitTransaction(); + } finally { + rollbackAndCleanup(commited, query); + } + Collections.sort(catalogs); + return catalogs; + } + + @Override + public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException { + LOG.debug("Dropping catalog " + catalogName); + boolean committed = false; + try { + openTransaction(); + MCatalog mCat = getMCatalog(catalogName); + pm.retrieve(mCat); + if (mCat == null) throw new NoSuchObjectException("No catalog " + catalogName); + pm.deletePersistent(mCat); + committed = commitTransaction(); + } finally { + if (!committed) rollbackTransaction(); + } + } + + private MCatalog getMCatalog(String catalogName) throws MetaException { + boolean committed = false; + Query query = null; + try { + openTransaction(); + catalogName = normalizeIdentifier(catalogName); + query = pm.newQuery(MCatalog.class, "name == catname"); + query.declareParameters("java.lang.String catname"); + query.setUnique(true); + MCatalog mCat = (MCatalog)query.execute(catalogName); + pm.retrieve(mCat); + committed = commitTransaction(); + return mCat; + } finally { + rollbackAndCleanup(committed, query); + } + } + + private MCatalog catToMCat(Catalog cat) { + MCatalog mCat = new MCatalog(); + mCat.setName(normalizeIdentifier(cat.getName())); + if (cat.isSetDescription()) mCat.setDescription(cat.getDescription()); + mCat.setLocationUri(cat.getLocationUri()); + return mCat; + } + + private Catalog mCatToCat(MCatalog mCat) { + Catalog cat = new Catalog(mCat.getName(), mCat.getLocationUri()); + if (mCat.getDescription() != null) cat.setDescription(mCat.getDescription()); + return cat; + } + + @Override public void createDatabase(Database db) throws InvalidObjectException, MetaException { boolean commited = false; MDatabase mdb = new MDatabase(); + assert db.getCatalogName() != null; + mdb.setCatalogName(normalizeIdentifier(db.getCatalogName())); + assert mdb.getCatalogName() != null; mdb.setName(db.getName().toLowerCase()); mdb.setLocationUri(db.getLocationUri()); mdb.setDescription(db.getDescription()); @@ -814,34 +940,35 @@ public void createDatabase(Database db) throws InvalidObjectException, MetaExcep } @SuppressWarnings("nls") - private MDatabase getMDatabase(String name) throws NoSuchObjectException { + private MDatabase getMDatabase(String catName, String name) throws NoSuchObjectException { MDatabase mdb = null; boolean commited = false; Query query = null; try { openTransaction(); name = normalizeIdentifier(name); - query = pm.newQuery(MDatabase.class, "name == dbname"); - query.declareParameters("java.lang.String dbname"); + catName = normalizeIdentifier(catName); + query = pm.newQuery(MDatabase.class, "name == dbname && catalogName == catname"); + query.declareParameters("java.lang.String dbname, java.lang.String catname"); query.setUnique(true); - mdb = (MDatabase) query.execute(name); + mdb = (MDatabase) query.execute(name, catName); pm.retrieve(mdb); commited = commitTransaction(); } finally { rollbackAndCleanup(commited, query); } if (mdb == null) { - throw new NoSuchObjectException("There is no database named " + name); + throw new NoSuchObjectException("There is no database " + catName + "." + name); } return mdb; } @Override - public Database getDatabase(String name) throws NoSuchObjectException { + public Database getDatabase(String catalogName, String name) throws NoSuchObjectException { MetaException ex = null; Database db = null; try { - db = getDatabaseInternal(name); + db = getDatabaseInternal(catalogName, name); } catch (MetaException e) { // Signature restriction to NSOE, and NSOE being a flat exception prevents us from // setting the cause of the NSOE as the MetaException. We should not lose the info @@ -850,32 +977,34 @@ public Database getDatabase(String name) throws NoSuchObjectException { ex = e; } if (db == null) { - LOG.warn("Failed to get database {}, returning NoSuchObjectException", name, ex); + LOG.warn("Failed to get database {}.{}, returning NoSuchObjectException", + catalogName, name, ex); throw new NoSuchObjectException(name + (ex == null ? "" : (": " + ex.getMessage()))); } return db; } - public Database getDatabaseInternal(String name) throws MetaException, NoSuchObjectException { - return new GetDbHelper(name, true, true) { + public Database getDatabaseInternal(String catalogName, String name) + throws MetaException, NoSuchObjectException { + return new GetDbHelper(catalogName, name, true, true) { @Override protected Database getSqlResult(GetHelper ctx) throws MetaException { - return directSql.getDatabase(dbName); + return directSql.getDatabase(catalogName, dbName); } @Override protected Database getJdoResult(GetHelper ctx) throws MetaException, NoSuchObjectException { - return getJDODatabase(dbName); + return getJDODatabase(catalogName, dbName); } }.run(false); } - public Database getJDODatabase(String name) throws NoSuchObjectException { + public Database getJDODatabase(String catName, String name) throws NoSuchObjectException { MDatabase mdb = null; boolean commited = false; try { openTransaction(); - mdb = getMDatabase(name); + mdb = getMDatabase(catName, name); commited = commitTransaction(); } finally { if (!commited) { @@ -891,6 +1020,7 @@ public Database getJDODatabase(String name) throws NoSuchObjectException { String type = org.apache.commons.lang.StringUtils.defaultIfBlank(mdb.getOwnerType(), null); PrincipalType principalType = (type == null) ? null : PrincipalType.valueOf(type); db.setOwnerType(principalType); + db.setCatalogName(catName); return db; } @@ -903,13 +1033,13 @@ public Database getJDODatabase(String name) throws NoSuchObjectException { * @throws NoSuchObjectException */ @Override - public boolean alterDatabase(String dbName, Database db) + public boolean alterDatabase(String catName, String dbName, Database db) throws MetaException, NoSuchObjectException { MDatabase mdb = null; boolean committed = false; try { - mdb = getMDatabase(dbName); + mdb = getMDatabase(catName, dbName); mdb.setParameters(db.getParameters()); mdb.setOwnerName(db.getOwnerName()); if (db.getOwnerType() != null) { @@ -934,19 +1064,21 @@ public boolean alterDatabase(String dbName, Database db) } @Override - public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException { + public boolean dropDatabase(String catName, String dbname) + throws NoSuchObjectException, MetaException { boolean success = false; - LOG.info("Dropping database {} along with all tables", dbname); + LOG.info("Dropping database {}.{} along with all tables", catName, dbname); dbname = normalizeIdentifier(dbname); + catName = normalizeIdentifier(catName); QueryWrapper queryWrapper = new QueryWrapper(); try { openTransaction(); // then drop the database - MDatabase db = getMDatabase(dbname); + MDatabase db = getMDatabase(catName, dbname); pm.retrieve(db); if (db != null) { - List dbGrants = this.listDatabaseGrants(dbname, queryWrapper); + List dbGrants = this.listDatabaseGrants(catName, dbname, queryWrapper); if (CollectionUtils.isNotEmpty(dbGrants)) { pm.deletePersistentAll(dbGrants); } @@ -960,9 +1092,9 @@ public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaExc } @Override - public List getDatabases(String pattern) throws MetaException { + public List getDatabases(String catName, String pattern) throws MetaException { if (pattern == null || pattern.equals("*")) { - return getAllDatabases(); + return getAllDatabases(catName); } boolean commited = false; List databases = null; @@ -974,6 +1106,7 @@ public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaExc String[] subpatterns = pattern.trim().split("\\|"); StringBuilder filterBuilder = new StringBuilder(); List parameterVals = new ArrayList<>(subpatterns.length); + appendSimpleCondition(filterBuilder, "catalogName", new String[] {catName}, parameterVals); appendPatternCondition(filterBuilder, "name", subpatterns, parameterVals); query = pm.newQuery(MDatabase.class, filterBuilder.toString()); query.setResult("name"); @@ -988,18 +1121,20 @@ public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaExc } @Override - public List getAllDatabases() throws MetaException { + public List getAllDatabases(String catName) throws MetaException { boolean commited = false; List databases = null; - String queryStr = "select name from org.apache.hadoop.hive.metastore.model.MDatabase"; Query query = null; + catName = normalizeIdentifier(catName); openTransaction(); try { - query = pm.newQuery(queryStr); + query = pm.newQuery("select name from org.apache.hadoop.hive.metastore.model.MDatabase " + + "where catalogName == catname"); + query.declareParameters("java.lang.String catname"); query.setResult("name"); - databases = new ArrayList<>((Collection) query.execute()); + databases = new ArrayList<>((Collection) query.execute(catName)); commited = commitTransaction(); } finally { rollbackAndCleanup(commited, query); @@ -1112,12 +1247,25 @@ public boolean dropType(String typeName) { // Add constraints. // We need not do a deep retrieval of the Table Column Descriptor while persisting the // constraints since this transaction involving create table is not yet committed. - List constraintNames = addForeignKeys(foreignKeys, false, primaryKeys, uniqueConstraints); - constraintNames.addAll(addPrimaryKeys(primaryKeys, false)); - constraintNames.addAll(addUniqueConstraints(uniqueConstraints, false)); - constraintNames.addAll(addNotNullConstraints(notNullConstraints, false)); - constraintNames.addAll(addDefaultConstraints(defaultConstraints, false)); - constraintNames.addAll(addCheckConstraints(checkConstraints, false)); + List constraintNames = new ArrayList<>(); + if (foreignKeys != null) { + constraintNames.addAll(addForeignKeys(foreignKeys, false, primaryKeys, uniqueConstraints)); + } + if (primaryKeys != null) { + constraintNames.addAll(addPrimaryKeys(primaryKeys, false)); + } + if (uniqueConstraints != null) { + constraintNames.addAll(addUniqueConstraints(uniqueConstraints, false)); + } + if (notNullConstraints != null) { + constraintNames.addAll(addNotNullConstraints(notNullConstraints, false)); + } + if (defaultConstraints != null) { + constraintNames.addAll(addDefaultConstraints(defaultConstraints, false)); + } + if (checkConstraints != null) { + constraintNames.addAll(addCheckConstraints(checkConstraints, false)); + } success = commitTransaction(); return constraintNames; } finally { @@ -1205,47 +1353,47 @@ private void putPersistentPrivObjects(MTable mtbl, List toPersistPrivObj } @Override - public boolean dropTable(String dbName, String tableName) throws MetaException, - NoSuchObjectException, InvalidObjectException, InvalidInputException { + public boolean dropTable(String catName, String dbName, String tableName) + throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { boolean materializedView = false; boolean success = false; try { openTransaction(); - MTable tbl = getMTable(dbName, tableName); + MTable tbl = getMTable(catName, dbName, tableName); pm.retrieve(tbl); if (tbl != null) { materializedView = TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType()); // first remove all the grants - List tabGrants = listAllTableGrants(dbName, tableName); + List tabGrants = listAllTableGrants(catName, dbName, tableName); if (CollectionUtils.isNotEmpty(tabGrants)) { pm.deletePersistentAll(tabGrants); } - List tblColGrants = listTableAllColumnGrants(dbName, + List tblColGrants = listTableAllColumnGrants(catName, dbName, tableName); if (CollectionUtils.isNotEmpty(tblColGrants)) { pm.deletePersistentAll(tblColGrants); } - List partGrants = this.listTableAllPartitionGrants(dbName, tableName); + List partGrants = this.listTableAllPartitionGrants(catName, dbName, tableName); if (CollectionUtils.isNotEmpty(partGrants)) { pm.deletePersistentAll(partGrants); } - List partColGrants = listTableAllPartitionColumnGrants(dbName, + List partColGrants = listTableAllPartitionColumnGrants(catName, dbName, tableName); if (CollectionUtils.isNotEmpty(partColGrants)) { pm.deletePersistentAll(partColGrants); } // delete column statistics if present try { - deleteTableColumnStatistics(dbName, tableName, null); + deleteTableColumnStatistics(catName, dbName, tableName, null); } catch (NoSuchObjectException e) { - LOG.info("Found no table level column statistics associated with db {}" + - " table {} record to delete", dbName, tableName); + LOG.info("Found no table level column statistics associated with {} to delete", + getCatalogQualifiedTableName(catName, dbName, tableName)); } List tabConstraints = listAllTableConstraintsWithOptionalConstraintName( - dbName, tableName, null); + catName, dbName, tableName, null); if (CollectionUtils.isNotEmpty(tabConstraints)) { pm.deletePersistentAll(tabConstraints); } @@ -1253,7 +1401,7 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, preDropStorageDescriptor(tbl.getSd()); if (materializedView) { - dropCreationMetadata( + dropCreationMetadata(tbl.getDatabase().getCatalogName(), tbl.getDatabase().getName(), tbl.getTableName()); } @@ -1273,12 +1421,12 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, return success; } - private boolean dropCreationMetadata(String dbName, String tableName) throws MetaException, + private boolean dropCreationMetadata(String catName, String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { boolean success = false; try { openTransaction(); - MCreationMetadata mcm = getCreationMetadata(dbName, tableName); + MCreationMetadata mcm = getCreationMetadata(catName, dbName, tableName); pm.retrieve(mcm); if (mcm != null) { pm.deletePersistentAll(mcm); @@ -1292,8 +1440,9 @@ private boolean dropCreationMetadata(String dbName, String tableName) throws Met return success; } - private List listAllTableConstraintsWithOptionalConstraintName - (String dbName, String tableName, String constraintname) { + private List listAllTableConstraintsWithOptionalConstraintName( + String catName, String dbName, String tableName, String constraintname) { + catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tableName = normalizeIdentifier(tableName); constraintname = constraintname!=null?normalizeIdentifier(constraintname):null; @@ -1303,19 +1452,21 @@ private boolean dropCreationMetadata(String dbName, String tableName) throws Met try { query = pm.newQuery("select constraintName from org.apache.hadoop.hive.metastore.model.MConstraint where " - + "((parentTable.tableName == ptblname && parentTable.database.name == pdbname) || " - + "(childTable != null && childTable.tableName == ctblname && " - + "childTable.database.name == cdbname)) " + (constraintname != null ? - " && constraintName == constraintname" : "")); + + "((parentTable.tableName == ptblname && parentTable.database.name == pdbname && " + + "parentTable.database.catalogName == pcatname) || " + + "(childTable != null && childTable.tableName == ctblname &&" + + "childTable.database.name == cdbname && childTable.database.catalogName == ccatname)) " + + (constraintname != null ? " && constraintName == constraintname" : "")); query.declareParameters("java.lang.String ptblname, java.lang.String pdbname," - + "java.lang.String ctblname, java.lang.String cdbname" + + + "java.lang.String pcatname, java.lang.String ctblname, java.lang.String cdbname," + + "java.lang.String ccatname" + (constraintname != null ? ", java.lang.String constraintname" : "")); Collection constraintNamesColl = constraintname != null ? ((Collection) query. - executeWithArray(tableName, dbName, tableName, dbName, constraintname)): + executeWithArray(tableName, dbName, catName, tableName, dbName, catName, constraintname)): ((Collection) query. - executeWithArray(tableName, dbName, tableName, dbName)); + executeWithArray(tableName, dbName, catName, tableName, dbName, catName)); for (Iterator i = constraintNamesColl.iterator(); i.hasNext();) { String currName = (String) i.next(); constraintNames.add(currName); @@ -1338,16 +1489,16 @@ private boolean dropCreationMetadata(String dbName, String tableName) throws Met } @Override - public Table getTable(String dbName, String tableName) throws MetaException { + public Table getTable(String catName, String dbName, String tableName) throws MetaException { boolean commited = false; Table tbl = null; try { openTransaction(); - tbl = convertToTable(getMTable(dbName, tableName)); + tbl = convertToTable(getMTable(catName, dbName, tableName)); // Retrieve creation metadata if needed if (tbl != null && TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) { tbl.setCreationMetadata( - convertToCreationMetadata(getCreationMetadata(dbName, tableName))); + convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName))); } commited = commitTransaction(); } finally { @@ -1359,40 +1510,46 @@ public Table getTable(String dbName, String tableName) throws MetaException { } @Override - public List getTables(String dbName, String pattern) throws MetaException { - return getTables(dbName, pattern, null); + public List getTables(String catName, String dbName, String pattern) + throws MetaException { + return getTables(catName, dbName, pattern, null); } @Override - public List getTables(String dbName, String pattern, TableType tableType) throws MetaException { + public List getTables(String catName, String dbName, String pattern, TableType tableType) + throws MetaException { try { // We only support pattern matching via jdo since pattern matching in Java // might be different than the one used by the metastore backends - return getTablesInternal(dbName, pattern, tableType, (pattern == null || pattern.equals(".*")), true); + return getTablesInternal(catName, dbName, pattern, tableType, + (pattern == null || pattern.equals(".*")), true); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } } - protected List getTablesInternal(String dbName, String pattern, TableType tableType, - boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { + protected List getTablesInternal(String catName, String dbName, String pattern, + TableType tableType, boolean allowSql, boolean allowJdo) + throws MetaException, NoSuchObjectException { final String db_name = normalizeIdentifier(dbName); - return new GetListHelper(dbName, null, allowSql, allowJdo) { + final String cat_name = normalizeIdentifier(catName); + return new GetListHelper(cat_name, dbName, null, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getTables(db_name, tableType); + return directSql.getTables(cat_name, db_name, tableType); } @Override protected List getJdoResult(GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getTablesInternalViaJdo(db_name, pattern, tableType); + return getTablesInternalViaJdo(cat_name, db_name, pattern, tableType); } }.run(false); } - private List getTablesInternalViaJdo(String dbName, String pattern, TableType tableType) throws MetaException { + private List getTablesInternalViaJdo(String catName, String dbName, String pattern, + TableType tableType) throws MetaException { boolean commited = false; Query query = null; List tbls = null; @@ -1405,6 +1562,7 @@ public Table getTable(String dbName, String tableName) throws MetaException { StringBuilder filterBuilder = new StringBuilder(); //adds database.name == dbName to the filter appendSimpleCondition(filterBuilder, "database.name", new String[] {dbName}, parameterVals); + appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals); if(pattern != null) { appendPatternCondition(filterBuilder, "tableName", pattern, parameterVals); } @@ -1425,21 +1583,23 @@ public Table getTable(String dbName, String tableName) throws MetaException { } @Override - public List getMaterializedViewsForRewriting(String dbName) + public List getMaterializedViewsForRewriting(String catName, String dbName) throws MetaException, NoSuchObjectException { final String db_name = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); boolean commited = false; Query query = null; List tbls = null; try { openTransaction(); dbName = normalizeIdentifier(dbName); - query = pm.newQuery(MTable.class, "database.name == db && tableType == tt" - + " && rewriteEnabled == re"); - query.declareParameters("java.lang.String db, java.lang.String tt, boolean re"); + query = pm.newQuery(MTable.class, + "database.name == db && database.catalogName == cat && tableType == tt && rewriteEnabled == re"); + query.declareParameters( + "java.lang.String db, java.lang.String cat, java.lang.String tt, boolean re"); query.setResult("tableName"); - Collection names = (Collection) query.execute( - db_name, TableType.MATERIALIZED_VIEW.toString(), true); + Collection names = (Collection) query.executeWithArray( + db_name, catName, TableType.MATERIALIZED_VIEW.toString(), true); tbls = new ArrayList<>(names); commited = commitTransaction(); } finally { @@ -1481,8 +1641,8 @@ private int getObjectCount(String fieldName, String objName) { } @Override - public List getTableMeta(String dbNames, String tableNames, List tableTypes) - throws MetaException { + public List getTableMeta(String catName, String dbNames, String tableNames, + List tableTypes) throws MetaException { boolean commited = false; Query query = null; @@ -1493,6 +1653,7 @@ private int getObjectCount(String fieldName, String objName) { // patterns StringBuilder filterBuilder = new StringBuilder(); List parameterVals = new ArrayList<>(); + appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals); if (dbNames != null && !dbNames.equals("*")) { appendPatternCondition(filterBuilder, "database.name", dbNames, parameterVals); } @@ -1503,6 +1664,10 @@ private int getObjectCount(String fieldName, String objName) { appendSimpleCondition(filterBuilder, "tableType", tableTypes.toArray(new String[0]), parameterVals); } + if (LOG.isDebugEnabled()) { + LOG.debug("getTableMeta with filter " + filterBuilder.toString() + " params: " + + StringUtils.join(parameterVals, ", ")); + } query = pm.newQuery(MTable.class, filterBuilder.toString()); Collection tables = (Collection) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()])); for (MTable table : tables) { @@ -1561,8 +1726,8 @@ private StringBuilder appendCondition(StringBuilder builder, } @Override - public List getAllTables(String dbName) throws MetaException { - return getTables(dbName, ".*"); + public List getAllTables(String catName, String dbName) throws MetaException { + return getTables(catName, dbName, ".*"); } class AttachedMTableInfo { @@ -1577,19 +1742,25 @@ public AttachedMTableInfo(MTable mtbl, MColumnDescriptor mcd) { } } - private AttachedMTableInfo getMTable(String db, String table, boolean retrieveCD) { + private AttachedMTableInfo getMTable(String catName, String db, String table, + boolean retrieveCD) { AttachedMTableInfo nmtbl = new AttachedMTableInfo(); MTable mtbl = null; boolean commited = false; Query query = null; try { openTransaction(); + catName = normalizeIdentifier(catName); db = normalizeIdentifier(db); table = normalizeIdentifier(table); - query = pm.newQuery(MTable.class, "tableName == table && database.name == db"); - query.declareParameters("java.lang.String table, java.lang.String db"); + query = pm.newQuery(MTable.class, + "tableName == table && database.name == db && database.catalogName == catname"); + query.declareParameters( + "java.lang.String table, java.lang.String db, java.lang.String catname"); query.setUnique(true); - mtbl = (MTable) query.execute(table, db); + LOG.debug("Executing getMTable for " + + getCatalogQualifiedTableName(catName, db, table)); + mtbl = (MTable) query.execute(table, db, catName); pm.retrieve(mtbl); // Retrieving CD can be expensive and unnecessary, so do it only when required. if (mtbl != null && retrieveCD) { @@ -1605,17 +1776,17 @@ private AttachedMTableInfo getMTable(String db, String table, boolean retrieveCD return nmtbl; } - private MCreationMetadata getCreationMetadata(String dbName, String tblName) { + private MCreationMetadata getCreationMetadata(String catName, String dbName, String tblName) { boolean commited = false; MCreationMetadata mcm = null; Query query = null; try { openTransaction(); query = pm.newQuery( - MCreationMetadata.class, "tblName == table && dbName == db"); - query.declareParameters("java.lang.String table, java.lang.String db"); + MCreationMetadata.class, "tblName == table && dbName == db && catalogName == cat"); + query.declareParameters("java.lang.String table, java.lang.String db, java.lang.String cat"); query.setUnique(true); - mcm = (MCreationMetadata) query.execute(tblName, dbName); + mcm = (MCreationMetadata) query.execute(tblName, dbName, catName); pm.retrieve(mcm); commited = commitTransaction(); } finally { @@ -1624,14 +1795,14 @@ private MCreationMetadata getCreationMetadata(String dbName, String tblName) { return mcm; } - private MTable getMTable(String db, String table) { - AttachedMTableInfo nmtbl = getMTable(db, table, false); + private MTable getMTable(String catName, String db, String table) { + AttachedMTableInfo nmtbl = getMTable(catName, db, table, false); return nmtbl.mtbl; } @Override - public List
getTableObjectsByName(String db, List tbl_names) throws MetaException, - UnknownDBException { + public List
getTableObjectsByName(String catName, String db, List tbl_names) + throws MetaException, UnknownDBException { List
tables = new ArrayList<>(); boolean committed = false; Query dbExistsQuery = null; @@ -1639,25 +1810,31 @@ private MTable getMTable(String db, String table) { try { openTransaction(); db = normalizeIdentifier(db); - dbExistsQuery = pm.newQuery(MDatabase.class, "name == db"); - dbExistsQuery.declareParameters("java.lang.String db"); - dbExistsQuery.setUnique(true); - dbExistsQuery.setResult("name"); - String dbNameIfExists = (String) dbExistsQuery.execute(db); - if (org.apache.commons.lang.StringUtils.isEmpty(dbNameIfExists)) { - throw new UnknownDBException("Could not find database " + db); - } + catName = normalizeIdentifier(catName); List lowered_tbl_names = new ArrayList<>(tbl_names.size()); for (String t : tbl_names) { lowered_tbl_names.add(normalizeIdentifier(t)); } query = pm.newQuery(MTable.class); - query.setFilter("database.name == db && tbl_names.contains(tableName)"); - query.declareParameters("java.lang.String db, java.util.Collection tbl_names"); - Collection mtables = (Collection) query.execute(db, lowered_tbl_names); - for (Iterator iter = mtables.iterator(); iter.hasNext();) { - tables.add(convertToTable((MTable) iter.next())); + query.setFilter("database.name == db && database.catalogName == cat && tbl_names.contains(tableName)"); + query.declareParameters("java.lang.String db, java.lang.String cat, java.util.Collection tbl_names"); + Collection mtables = (Collection) query.execute(db, catName, lowered_tbl_names); + if (mtables == null || mtables.isEmpty()) { + // Need to differentiate between an unmatched pattern and a non-existent database + dbExistsQuery = pm.newQuery(MDatabase.class, "name == db && catalogName == cat"); + dbExistsQuery.declareParameters("java.lang.String db, java.lang.String cat"); + dbExistsQuery.setUnique(true); + dbExistsQuery.setResult("name"); + String dbNameIfExists = (String) dbExistsQuery.execute(db, catName); + if (org.apache.commons.lang.StringUtils.isEmpty(dbNameIfExists)) { + throw new UnknownDBException("Could not find database " + + getCatalogQualifiedDbName(catName, db)); + } + } else { + for (Iterator iter = mtables.iterator(); iter.hasNext(); ) { + tables.add(convertToTable((MTable) iter.next())); + } } committed = commitTransaction(); } finally { @@ -1701,6 +1878,7 @@ private Table convertToTable(MTable mtbl) throws MetaException { convertToFieldSchemas(mtbl.getPartitionKeys()), convertMap(mtbl.getParameters()), mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType); t.setRewriteEnabled(mtbl.isRewriteEnabled()); + t.setCatName(mtbl.getDatabase().getCatalogName()); return t; } @@ -1710,12 +1888,13 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, return null; } MDatabase mdb = null; + String catName = tbl.isSetCatName() ? tbl.getCatName() : getDefaultCatalog(conf); try { - mdb = getMDatabase(tbl.getDbName()); + mdb = getMDatabase(catName, tbl.getDbName()); } catch (NoSuchObjectException e) { LOG.error("Could not convert to MTable", e); - throw new InvalidObjectException("Database " + tbl.getDbName() - + " doesn't exist."); + throw new InvalidObjectException("Database " + + getCatalogQualifiedDbName(catName, tbl.getDbName()) + " doesn't exist."); } // If the table has property EXTERNAL set, update table type @@ -1963,9 +2142,9 @@ private MCreationMetadata convertToMCreationMetadata( Set tablesUsed = new HashSet<>(); for (String fullyQualifiedName : m.getTablesUsed()) { String[] names = fullyQualifiedName.split("\\."); - tablesUsed.add(getMTable(names[0], names[1], false).mtbl); + tablesUsed.add(getMTable(m.getCatName(), names[0], names[1], false).mtbl); } - return new MCreationMetadata(m.getDbName(), m.getTblName(), + return new MCreationMetadata(m.getCatName(), m.getDbName(), m.getTblName(), tablesUsed, m.getValidTxnList()); } @@ -1980,7 +2159,7 @@ private CreationMetadata convertToCreationMetadata( Warehouse.getQualifiedName( mtbl.getDatabase().getName(), mtbl.getTableName())); } - CreationMetadata r = new CreationMetadata( + CreationMetadata r = new CreationMetadata(s.getCatalogName(), s.getDbName(), s.getTblName(), tablesUsed); if (s.getTxnList() != null) { r.setValidTxnList(s.getTxnList()); @@ -1989,17 +2168,17 @@ private CreationMetadata convertToCreationMetadata( } @Override - public boolean addPartitions(String dbName, String tblName, List parts) + public boolean addPartitions(String catName, String dbName, String tblName, List parts) throws InvalidObjectException, MetaException { boolean success = false; openTransaction(); try { List tabGrants = null; List tabColumnGrants = null; - MTable table = this.getMTable(dbName, tblName); + MTable table = this.getMTable(catName, dbName, tblName); if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { - tabGrants = this.listAllTableGrants(dbName, tblName); - tabColumnGrants = this.listTableAllColumnGrants(dbName, tblName); + tabGrants = this.listAllTableGrants(catName, dbName, tblName); + tabColumnGrants = this.listTableAllColumnGrants(catName, dbName, tblName); } List toPersist = new ArrayList<>(); for (Partition part : parts) { @@ -2044,7 +2223,7 @@ private boolean isValidPartition( Partition part, boolean ifNotExists) throws MetaException { MetaStoreUtils.validatePartitionNameCharacters(part.getValues(), partitionValidationPattern); - boolean doesExist = doesPartitionExist( + boolean doesExist = doesPartitionExist(part.getCatName(), part.getDbName(), part.getTableName(), part.getValues()); if (doesExist && !ifNotExists) { throw new MetaException("Partition already exists: " + part); @@ -2053,7 +2232,7 @@ private boolean isValidPartition( } @Override - public boolean addPartitions(String dbName, String tblName, + public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { boolean success = false; @@ -2061,10 +2240,10 @@ public boolean addPartitions(String dbName, String tblName, try { List tabGrants = null; List tabColumnGrants = null; - MTable table = this.getMTable(dbName, tblName); + MTable table = this.getMTable(catName, dbName, tblName); if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { - tabGrants = this.listAllTableGrants(dbName, tblName); - tabColumnGrants = this.listTableAllColumnGrants(dbName, tblName); + tabGrants = this.listAllTableGrants(catName, dbName, tblName); + tabColumnGrants = this.listTableAllColumnGrants(catName, dbName, tblName); } if (!partitionSpec.getTableName().equals(tblName) || !partitionSpec.getDbName().equals(dbName)) { @@ -2115,14 +2294,14 @@ public boolean addPartition(Partition part) throws InvalidObjectException, boolean success = false; boolean commited = false; try { - MTable table = this.getMTable(part.getDbName(), part.getTableName()); + String catName = part.isSetCatName() ? part.getCatName() : getDefaultCatalog(conf); + MTable table = this.getMTable(catName, part.getDbName(), part.getTableName()); List tabGrants = null; List tabColumnGrants = null; if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { - tabGrants = this.listAllTableGrants(part - .getDbName(), part.getTableName()); + tabGrants = this.listAllTableGrants(catName, part.getDbName(), part.getTableName()); tabColumnGrants = this.listTableAllColumnGrants( - part.getDbName(), part.getTableName()); + catName, part.getDbName(), part.getTableName()); } openTransaction(); MPartition mpart = convertToMPart(part, true); @@ -2165,10 +2344,10 @@ public boolean addPartition(Partition part) throws InvalidObjectException, } @Override - public Partition getPartition(String dbName, String tableName, + public Partition getPartition(String catName, String dbName, String tableName, List part_vals) throws NoSuchObjectException, MetaException { openTransaction(); - Partition part = convertToPart(getMPartition(dbName, tableName, part_vals)); + Partition part = convertToPart(getMPartition(catName, dbName, tableName, part_vals)); commitTransaction(); if(part == null) { throw new NoSuchObjectException("partition values=" @@ -2178,7 +2357,7 @@ public Partition getPartition(String dbName, String tableName, return part; } - private MPartition getMPartition(String dbName, String tableName, List part_vals) + private MPartition getMPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException { List mparts = null; MPartition ret = null; @@ -2186,9 +2365,10 @@ private MPartition getMPartition(String dbName, String tableName, List p Query query = null; try { openTransaction(); + catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tableName = normalizeIdentifier(tableName); - MTable mtbl = getMTable(dbName, tableName); + MTable mtbl = getMTable(catName, dbName, tableName); if (mtbl == null) { commited = commitTransaction(); return null; @@ -2199,9 +2379,11 @@ private MPartition getMPartition(String dbName, String tableName, List p Warehouse.makePartName(convertToFieldSchemas(mtbl.getPartitionKeys()), part_vals); query = pm.newQuery(MPartition.class, - "table.tableName == t1 && table.database.name == t2 && partitionName == t3"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - mparts = (List) query.execute(tableName, dbName, name); + "table.tableName == t1 && table.database.name == t2 && partitionName == t3 " + + " && table.database.catalogName == t4"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " + + "java.lang.String t4"); + mparts = (List) query.executeWithArray(tableName, dbName, name, catName); pm.retrieveAll(mparts); commited = commitTransaction(); // We need to compare partition name with requested name since some DBs @@ -2244,7 +2426,7 @@ private MPartition convertToMPart(Partition part, boolean useTableCD) if (part == null) { return null; } - MTable mt = getMTable(part.getDbName(), part.getTableName()); + MTable mt = getMTable(part.getCatName(), part.getDbName(), part.getTableName()); if (mt == null) { throw new InvalidObjectException( "Partition doesn't have a valid table or database name"); @@ -2275,30 +2457,34 @@ private Partition convertToPart(MPartition mpart) throws MetaException { if (mpart == null) { return null; } - return new Partition(convertList(mpart.getValues()), mpart.getTable().getDatabase() + Partition p = new Partition(convertList(mpart.getValues()), mpart.getTable().getDatabase() .getName(), mpart.getTable().getTableName(), mpart.getCreateTime(), mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd()), convertMap(mpart.getParameters())); + p.setCatName(mpart.getTable().getDatabase().getCatalogName()); + return p; } - private Partition convertToPart(String dbName, String tblName, MPartition mpart) + private Partition convertToPart(String catName, String dbName, String tblName, MPartition mpart) throws MetaException { if (mpart == null) { return null; } - return new Partition(convertList(mpart.getValues()), dbName, tblName, + Partition p = new Partition(convertList(mpart.getValues()), dbName, tblName, mpart.getCreateTime(), mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd(), false), convertMap(mpart.getParameters())); + p.setCatName(catName); + return p; } @Override - public boolean dropPartition(String dbName, String tableName, + public boolean dropPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { boolean success = false; try { openTransaction(); - MPartition part = getMPartition(dbName, tableName, part_vals); + MPartition part = getMPartition(catName, dbName, tableName, part_vals); dropPartitionCommon(part); success = commitTransaction(); } finally { @@ -2310,7 +2496,7 @@ public boolean dropPartition(String dbName, String tableName, } @Override - public void dropPartitions(String dbName, String tblName, List partNames) + public void dropPartitions(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { if (CollectionUtils.isEmpty(partNames)) { return; @@ -2319,15 +2505,15 @@ public void dropPartitions(String dbName, String tblName, List partNames openTransaction(); try { // Delete all things. - dropPartitionGrantsNoTxn(dbName, tblName, partNames); - dropPartitionAllColumnGrantsNoTxn(dbName, tblName, partNames); - dropPartitionColumnStatisticsNoTxn(dbName, tblName, partNames); + dropPartitionGrantsNoTxn(catName, dbName, tblName, partNames); + dropPartitionAllColumnGrantsNoTxn(catName, dbName, tblName, partNames); + dropPartitionColumnStatisticsNoTxn(catName, dbName, tblName, partNames); // CDs are reused; go thry partition SDs, detach all CDs from SDs, then remove unused CDs. - for (MColumnDescriptor mcd : detachCdsFromSdsNoTxn(dbName, tblName, partNames)) { + for (MColumnDescriptor mcd : detachCdsFromSdsNoTxn(catName, dbName, tblName, partNames)) { removeUnusedColumnDescriptor(mcd); } - dropPartitionsNoTxn(dbName, tblName, partNames); + dropPartitionsNoTxn(catName, dbName, tblName, partNames); if (!(success = commitTransaction())) { throw new MetaException("Failed to drop partitions"); // Should not happen? } @@ -2341,12 +2527,6 @@ public void dropPartitions(String dbName, String tblName, List partNames /** * Drop an MPartition and cascade deletes (e.g., delete partition privilege grants, * drop the storage descriptor cleanly, etc.) - * @param part - the MPartition to drop - * @return whether the transaction committed successfully - * @throws InvalidInputException - * @throws InvalidObjectException - * @throws MetaException - * @throws NoSuchObjectException */ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { @@ -2362,6 +2542,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio String partName = FileUtils.makePartName(colNames, part.getValues()); List partGrants = listPartitionGrants( + part.getTable().getDatabase().getCatalogName(), part.getTable().getDatabase().getName(), part.getTable().getTableName(), Lists.newArrayList(partName)); @@ -2371,6 +2552,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio } List partColumnGrants = listPartitionAllColumnGrants( + part.getTable().getDatabase().getCatalogName(), part.getTable().getDatabase().getName(), part.getTable().getTableName(), Lists.newArrayList(partName)); @@ -2378,12 +2560,13 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio pm.deletePersistentAll(partColumnGrants); } + String catName = part.getTable().getDatabase().getCatalogName(); String dbName = part.getTable().getDatabase().getName(); String tableName = part.getTable().getTableName(); // delete partition level column stats if it exists try { - deletePartitionColumnStatistics(dbName, tableName, partName, part.getValues(), null); + deletePartitionColumnStatistics(catName, dbName, tableName, partName, part.getValues(), null); } catch (NoSuchObjectException e) { LOG.info("No column statistics records found to delete"); } @@ -2401,26 +2584,26 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio } @Override - public List getPartitions( - String dbName, String tableName, int maxParts) throws MetaException, NoSuchObjectException { - return getPartitionsInternal(dbName, tableName, maxParts, true, true); + public List getPartitions(String catName, String dbName, String tableName, + int maxParts) throws MetaException, NoSuchObjectException { + return getPartitionsInternal(catName, dbName, tableName, maxParts, true, true); } - protected List getPartitionsInternal( - String dbName, String tblName, final int maxParts, boolean allowSql, boolean allowJdo) + protected List getPartitionsInternal(String catName, String dbName, String tblName, + final int maxParts, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { - return new GetListHelper(dbName, tblName, allowSql, allowJdo) { + return new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { Integer max = (maxParts < 0) ? null : maxParts; - return directSql.getPartitions(dbName, tblName, max); + return directSql.getPartitions(catName, dbName, tblName, max); } @Override protected List getJdoResult( GetHelper> ctx) throws MetaException { QueryWrapper queryWrapper = new QueryWrapper(); try { - return convertToParts(listMPartitions(dbName, tblName, maxParts, queryWrapper)); + return convertToParts(listMPartitions(catName, dbName, tblName, maxParts, queryWrapper)); } finally { queryWrapper.close(); } @@ -2429,7 +2612,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio } @Override - public List getPartitionsWithAuth(String dbName, String tblName, + public List getPartitionsWithAuth(String catName, String dbName, String tblName, short max, String userName, List groupNames) throws MetaException, InvalidObjectException { boolean success = false; @@ -2437,7 +2620,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio try { openTransaction(); - List mparts = listMPartitions(dbName, tblName, max, queryWrapper); + List mparts = listMPartitions(catName, dbName, tblName, max, queryWrapper); List parts = new ArrayList<>(mparts.size()); if (CollectionUtils.isNotEmpty(mparts)) { for (MPartition mpart : mparts) { @@ -2448,7 +2631,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl .getPartitionKeys()), part.getValues()); - PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(dbName, + PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames); part.setPrivileges(partAuth); } @@ -2462,13 +2645,13 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio } @Override - public Partition getPartitionWithAuth(String dbName, String tblName, + public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String user_name, List group_names) throws NoSuchObjectException, MetaException, InvalidObjectException { boolean success = false; try { openTransaction(); - MPartition mpart = getMPartition(dbName, tblName, partVals); + MPartition mpart = getMPartition(catName, dbName, tblName, partVals); if (mpart == null) { commitTransaction(); throw new NoSuchObjectException("partition values=" @@ -2480,7 +2663,7 @@ public Partition getPartitionWithAuth(String dbName, String tblName, if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl .getPartitionKeys()), partVals); - PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(dbName, + PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(catName, dbName, tblName, partName, user_name, group_names); part.setPrivileges(partAuth); } @@ -2513,11 +2696,11 @@ public Partition getPartitionWithAuth(String dbName, String tblName, return dest; } - private List convertToParts(String dbName, String tblName, List mparts) + private List convertToParts(String catName, String dbName, String tblName, List mparts) throws MetaException { List parts = new ArrayList<>(mparts.size()); for (MPartition mp : mparts) { - parts.add(convertToPart(dbName, tblName, mp)); + parts.add(convertToPart(catName, dbName, tblName, mp)); Deadline.checkTimeout(); } return parts; @@ -2525,14 +2708,14 @@ public Partition getPartitionWithAuth(String dbName, String tblName, // TODO:pc implement max @Override - public List listPartitionNames(String dbName, String tableName, + public List listPartitionNames(String catName, String dbName, String tableName, short max) throws MetaException { List pns = null; boolean success = false; try { openTransaction(); LOG.debug("Executing getPartitionNames"); - pns = getPartitionNamesNoTxn(dbName, tableName, max); + pns = getPartitionNamesNoTxn(catName, dbName, tableName, max); success = commitTransaction(); } finally { if (!success) { @@ -2582,21 +2765,24 @@ private String extractPartitionKey(FieldSchema key, List pkeys) { } @Override - public PartitionValuesResponse listPartitionValues(String dbName, String tableName, List cols, - boolean applyDistinct, String filter, boolean ascending, - List order, long maxParts) throws MetaException { + public PartitionValuesResponse listPartitionValues(String catName, String dbName, + String tableName, List cols, + boolean applyDistinct, String filter, + boolean ascending, List order, + long maxParts) throws MetaException { + catName = normalizeIdentifier(catName); dbName = dbName.toLowerCase().trim(); tableName = tableName.toLowerCase().trim(); try { if (filter == null || filter.isEmpty()) { - PartitionValuesResponse response = - getDistinctValuesForPartitionsNoTxn(dbName, tableName, cols, applyDistinct, ascending, maxParts); + PartitionValuesResponse response = getDistinctValuesForPartitionsNoTxn(catName, dbName, + tableName, cols, applyDistinct, maxParts); LOG.info("Number of records fetched: {}", response.getPartitionValues().size()); return response; } else { PartitionValuesResponse response = - extractPartitionNamesByFilter(dbName, tableName, filter, cols, ascending, applyDistinct, maxParts); + extractPartitionNamesByFilter(catName, dbName, tableName, filter, cols, ascending, maxParts); if (response != null && response.getPartitionValues() != null) { LOG.info("Number of records fetched with filter: {}", response.getPartitionValues().size()); } @@ -2605,31 +2791,33 @@ public PartitionValuesResponse listPartitionValues(String dbName, String tableNa } catch (Exception t) { LOG.error("Exception in ORM", t); throw new MetaException("Error retrieving partition values: " + t); - } finally { } } - private PartitionValuesResponse extractPartitionNamesByFilter(String dbName, String tableName, String filter, - List cols, boolean ascending, boolean applyDistinct, long maxParts) + private PartitionValuesResponse extractPartitionNamesByFilter( + String catName, String dbName, String tableName, String filter, List cols, + boolean ascending, long maxParts) throws MetaException, NoSuchObjectException { - LOG.info("Database: {} Table: {} filter: \"{}\" cols: {}", dbName, tableName, filter, cols); + LOG.info("Table: {} filter: \"{}\" cols: {}", + getCatalogQualifiedTableName(catName, dbName, tableName), filter, cols); List partitionNames = null; List partitions = null; - Table tbl = getTable(dbName, tableName); + Table tbl = getTable(catName, dbName, tableName); try { // Get partitions by name - ascending or descending - partitionNames = getPartitionNamesByFilter(dbName, tableName, filter, ascending, maxParts); + partitionNames = getPartitionNamesByFilter(catName, dbName, tableName, filter, ascending, + maxParts); } catch (MetaException e) { LOG.warn("Querying by partition names failed, trying out with partition objects, filter: {}", filter); } if (partitionNames == null) { - partitions = getPartitionsByFilter(dbName, tableName, filter, (short) maxParts); + partitions = getPartitionsByFilter(catName, dbName, tableName, filter, (short) maxParts); } if (partitions != null) { - partitionNames = new ArrayList(partitions.size()); + partitionNames = new ArrayList<>(partitions.size()); for (Partition partition : partitions) { // Check for NULL's just to be safe if (tbl.getPartitionKeys() != null && partition.getValues() != null) { @@ -2640,7 +2828,7 @@ private PartitionValuesResponse extractPartitionNamesByFilter(String dbName, Str if (partitionNames == null && partitions == null) { throw new MetaException("Cannot obtain list of partitions by filter:\"" + filter + - "\" for " + dbName + ":" + tableName); + "\" for " + getCatalogQualifiedTableName(catName, dbName, tableName)); } if (!ascending) { @@ -2649,7 +2837,7 @@ private PartitionValuesResponse extractPartitionNamesByFilter(String dbName, Str // Return proper response PartitionValuesResponse response = new PartitionValuesResponse(); - response.setPartitionValues(new ArrayList(partitionNames.size())); + response.setPartitionValues(new ArrayList<>(partitionNames.size())); LOG.info("Converting responses to Partition values for items: {}", partitionNames.size()); for (String partName : partitionNames) { ArrayList vals = new ArrayList(Collections.nCopies(tbl.getPartitionKeys().size(), null)); @@ -2663,26 +2851,27 @@ private PartitionValuesResponse extractPartitionNamesByFilter(String dbName, Str return response; } - private List getPartitionNamesByFilter(String dbName, String tableName, + private List getPartitionNamesByFilter(String catName, String dbName, String tableName, String filter, boolean ascending, long maxParts) throws MetaException { boolean success = false; - List partNames = new ArrayList(); + List partNames = new ArrayList<>(); try { openTransaction(); LOG.debug("Executing getPartitionNamesByFilter"); + catName = normalizeIdentifier(catName); dbName = dbName.toLowerCase(); tableName = tableName.toLowerCase(); - MTable mtable = getMTable(dbName, tableName); + MTable mtable = getMTable(catName, dbName, tableName); if( mtable == null ) { // To be consistent with the behavior of listPartitionNames, if the // table or db does not exist, we return an empty list return partNames; } - Map params = new HashMap(); - String queryFilterString = makeQueryFilterString(dbName, mtable, filter, params); + Map params = new HashMap<>(); + String queryFilterString = makeQueryFilterString(catName, dbName, mtable, filter, params); Query query = pm.newQuery( "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " + "where " + queryFilterString); @@ -2721,15 +2910,16 @@ private PartitionValuesResponse extractPartitionNamesByFilter(String dbName, Str return partNames; } - private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String dbName, String tableName, List cols, - boolean applyDistinct, boolean ascending, long maxParts) + private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn( + String catName, String dbName, String tableName, List cols, + boolean applyDistinct, long maxParts) throws MetaException { - try { openTransaction(); Query q = pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " - + "where table.database.name == t1 && table.tableName == t2 "); - q.declareParameters("java.lang.String t1, java.lang.String t2"); + + "where table.database.name == t1 && table.database.catalogName == t2 && " + + "table.tableName == t3 "); + q.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); // TODO: Ordering seems to affect the distinctness, needs checking, disabling. /* @@ -2746,7 +2936,7 @@ private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String dbNam if (applyDistinct) { partValuesSelect.append("DISTINCT "); } - List partitionKeys = getTable(dbName, tableName).getPartitionKeys(); + List partitionKeys = getTable(catName, dbName, tableName).getPartitionKeys(); for (FieldSchema key : cols) { partValuesSelect.append(extractPartitionKey(key, partitionKeys)).append(", "); } @@ -2755,9 +2945,9 @@ private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String dbNam q.setResult(partValuesSelect.toString()); PartitionValuesResponse response = new PartitionValuesResponse(); - response.setPartitionValues(new ArrayList()); + response.setPartitionValues(new ArrayList<>()); if (cols.size() > 1) { - List results = (List) q.execute(dbName, tableName); + List results = (List) q.execute(dbName, catName, tableName); for (Object[] row : results) { PartitionValuesRow rowResponse = new PartitionValuesRow(); for (Object columnValue : row) { @@ -2766,7 +2956,7 @@ private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String dbNam response.addToPartitionValues(rowResponse); } } else { - List results = (List) q.execute(dbName, tableName); + List results = (List) q.execute(dbName, catName, tableName); for (Object row : results) { PartitionValuesRow rowResponse = new PartitionValuesRow(); rowResponse.addToRow((String) row); @@ -2780,24 +2970,25 @@ private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String dbNam } } - private List getPartitionNamesNoTxn(String dbName, String tableName, short max) { + private List getPartitionNamesNoTxn(String catName, String dbName, String tableName, short max) { List pns = new ArrayList<>(); if (max == 0) { return pns; } + catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tableName = normalizeIdentifier(tableName); Query query = pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " - + "where table.database.name == t1 && table.tableName == t2 " + + "where table.database.name == t1 && table.tableName == t2 && table.database.catalogName == t3 " + "order by partitionName asc"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); query.setResult("partitionName"); if (max > 0) { query.setRange(0, max); } - Collection names = (Collection) query.execute(dbName, tableName); + Collection names = (Collection) query.execute(dbName, tableName, catName); pns.addAll(names); if (query != null) { @@ -2821,14 +3012,16 @@ private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String dbNam * you want results for. E.g., if resultsCol is partitionName, the Collection * has types of String, and if resultsCol is null, the types are MPartition. */ - private Collection getPartitionPsQueryResults(String dbName, String tableName, + private Collection getPartitionPsQueryResults(String catName, String dbName, String tableName, List part_vals, short max_parts, String resultsCol, QueryWrapper queryWrapper) throws MetaException, NoSuchObjectException { + catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tableName = normalizeIdentifier(tableName); - Table table = getTable(dbName, tableName); + Table table = getTable(catName, dbName, tableName); if (table == null) { - throw new NoSuchObjectException(dbName + "." + tableName + " table not found"); + throw new NoSuchObjectException(getCatalogQualifiedTableName(catName, dbName, tableName) + + " table not found"); } List partCols = table.getPartitionKeys(); int numPartKeys = partCols.size(); @@ -2849,10 +3042,11 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, } Query query = queryWrapper.query = pm.newQuery(MPartition.class); StringBuilder queryFilter = new StringBuilder("table.database.name == dbName"); + queryFilter.append(" && table.database.catalogName == catName"); queryFilter.append(" && table.tableName == tableName"); queryFilter.append(" && partitionName.matches(partialRegex)"); query.setFilter(queryFilter.toString()); - query.declareParameters("java.lang.String dbName, " + query.declareParameters("java.lang.String dbName, java.lang.String catName, " + "java.lang.String tableName, java.lang.String partialRegex"); if (max_parts >= 0) { // User specified a row limit, set it on the Query @@ -2862,11 +3056,11 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, query.setResult(resultsCol); } - return (Collection) query.execute(dbName, tableName, partNameMatcher); + return (Collection) query.executeWithArray(dbName, catName, tableName, partNameMatcher); } @Override - public List listPartitionsPsWithAuth(String db_name, String tbl_name, + public List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, List part_vals, short max_parts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { List partitions = new ArrayList<>(); @@ -2876,9 +3070,9 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, try { openTransaction(); LOG.debug("executing listPartitionNamesPsWithAuth"); - Collection parts = getPartitionPsQueryResults(db_name, tbl_name, + Collection parts = getPartitionPsQueryResults(catName, db_name, tbl_name, part_vals, max_parts, null, queryWrapper); - MTable mtbl = getMTable(db_name, tbl_name); + MTable mtbl = getMTable(catName, db_name, tbl_name); for (Object o : parts) { Partition part = convertToPart((MPartition) o); //set auth privileges @@ -2886,7 +3080,7 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, "TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl .getPartitionKeys()), part.getValues()); - PrincipalPrivilegeSet partAuth = getPartitionPrivilegeSet(db_name, + PrincipalPrivilegeSet partAuth = getPartitionPrivilegeSet(catName, db_name, tbl_name, partName, userName, groupNames); part.setPrivileges(partAuth); } @@ -2900,7 +3094,7 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, } @Override - public List listPartitionNamesPs(String dbName, String tableName, + public List listPartitionNamesPs(String catName, String dbName, String tableName, List part_vals, short max_parts) throws MetaException, NoSuchObjectException { List partitionNames = new ArrayList<>(); boolean success = false; @@ -2909,7 +3103,7 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, try { openTransaction(); LOG.debug("Executing listPartitionNamesPs"); - Collection names = getPartitionPsQueryResults(dbName, tableName, + Collection names = getPartitionPsQueryResults(catName, dbName, tableName, part_vals, max_parts, "partitionName", queryWrapper); partitionNames.addAll(names); success = commitTransaction(); @@ -2920,7 +3114,8 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, } // TODO:pc implement max - private List listMPartitions(String dbName, String tableName, int max, QueryWrapper queryWrapper) { + private List listMPartitions(String catName, String dbName, String tableName, + int max, QueryWrapper queryWrapper) { boolean success = false; List mparts = null; try { @@ -2928,13 +3123,14 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, LOG.debug("Executing listMPartitions"); dbName = normalizeIdentifier(dbName); tableName = normalizeIdentifier(tableName); - Query query = queryWrapper.query = pm.newQuery(MPartition.class, "table.tableName == t1 && table.database.name == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); + Query query = queryWrapper.query = pm.newQuery(MPartition.class, + "table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); query.setOrdering("partitionName ascending"); if (max > 0) { query.setRange(0, max); } - mparts = (List) query.execute(tableName, dbName); + mparts = (List) query.execute(tableName, dbName, catName); LOG.debug("Done executing query for listMPartitions"); pm.retrieveAll(mparts); success = commitTransaction(); @@ -2948,41 +3144,43 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, } @Override - public List getPartitionsByNames(String dbName, String tblName, + public List getPartitionsByNames(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { - return getPartitionsByNamesInternal(dbName, tblName, partNames, true, true); + return getPartitionsByNamesInternal(catName, dbName, tblName, partNames, true, true); } - protected List getPartitionsByNamesInternal(String dbName, String tblName, - final List partNames, boolean allowSql, boolean allowJdo) + protected List getPartitionsByNamesInternal(String catName, String dbName, + String tblName, + final List partNames, + boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { - return new GetListHelper(dbName, tblName, allowSql, allowJdo) { + return new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames); + return directSql.getPartitionsViaSqlFilter(catName, dbName, tblName, partNames); } @Override protected List getJdoResult( GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getPartitionsViaOrmFilter(dbName, tblName, partNames); + return getPartitionsViaOrmFilter(catName, dbName, tblName, partNames); } }.run(false); } @Override - public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, + public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException { return getPartitionsByExprInternal( - dbName, tblName, expr, defaultPartitionName, maxParts, result, true, true); + catName, dbName, tblName, expr, defaultPartitionName, maxParts, result, true, true); } - protected boolean getPartitionsByExprInternal(String dbName, String tblName, final byte[] expr, + protected boolean getPartitionsByExprInternal(String catName, String dbName, String tblName, final byte[] expr, final String defaultPartitionName, final short maxParts, List result, boolean allowSql, boolean allowJdo) throws TException { assert result != null; final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr); final AtomicBoolean hasUnknownPartitions = new AtomicBoolean(false); - result.addAll(new GetListHelper(dbName, tblName, allowSql, allowJdo) { + result.addAll(new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { // If we have some sort of expression tree, try SQL filter pushdown. @@ -2997,7 +3195,7 @@ protected boolean getPartitionsByExprInternal(String dbName, String tblName, fin List partNames = new LinkedList<>(); hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( ctx.getTable(), expr, defaultPartitionName, maxParts, partNames)); - return directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames); + return directSql.getPartitionsViaSqlFilter(catName, dbName, tblName, partNames); } @Override @@ -3013,7 +3211,7 @@ protected boolean getPartitionsByExprInternal(String dbName, String tblName, fin List partNames = new ArrayList<>(); hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( ctx.getTable(), expr, defaultPartitionName, maxParts, partNames)); - result = getPartitionsViaOrmFilter(dbName, tblName, partNames); + result = getPartitionsViaOrmFilter(catName, dbName, tblName, partNames); } return result; } @@ -3034,7 +3232,7 @@ protected boolean getPartitionsByExprInternal(String dbName, String tblName, fin */ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, String defaultPartName, short maxParts, List result) throws MetaException { - result.addAll(getPartitionNamesNoTxn( + result.addAll(getPartitionNamesNoTxn(table.getCatName(), table.getDbName(), table.getTableName(), maxParts)); if (defaultPartName == null || defaultPartName.isEmpty()) { defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); @@ -3056,7 +3254,7 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, short maxParts, boolean isValidatedFilter) throws MetaException { Map params = new HashMap<>(); String jdoFilter = - makeQueryFilterString(table.getDbName(), table, tree, params, isValidatedFilter); + makeQueryFilterString(table.getCatName(), table.getDbName(), table, tree, params, isValidatedFilter); if (jdoFilter == null) { assert !isValidatedFilter; return null; @@ -3083,7 +3281,8 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, private Integer getNumPartitionsViaOrmFilter(Table table, ExpressionTree tree, boolean isValidatedFilter) throws MetaException { Map params = new HashMap<>(); - String jdoFilter = makeQueryFilterString(table.getDbName(), table, tree, params, isValidatedFilter); + String jdoFilter = makeQueryFilterString(table.getCatName(), table.getDbName(), table, tree, + params, isValidatedFilter); if (jdoFilter == null) { assert !isValidatedFilter; return null; @@ -3108,29 +3307,29 @@ private Integer getNumPartitionsViaOrmFilter(Table table, ExpressionTree tree, b * @param partNames Partition names to get the objects for. * @return Resulting partitions. */ - private List getPartitionsViaOrmFilter( + private List getPartitionsViaOrmFilter(String catName, String dbName, String tblName, List partNames) throws MetaException { if (partNames.isEmpty()) { return new ArrayList<>(); } ObjectPair> queryWithParams = - getPartQueryWithParams(dbName, tblName, partNames); + getPartQueryWithParams(catName, dbName, tblName, partNames); Query query = queryWithParams.getFirst(); query.setResultClass(MPartition.class); query.setClass(MPartition.class); query.setOrdering("partitionName ascending"); @SuppressWarnings("unchecked") List mparts = (List)query.executeWithMap(queryWithParams.getSecond()); - List partitions = convertToParts(dbName, tblName, mparts); + List partitions = convertToParts(catName, dbName, tblName, mparts); if (query != null) { query.closeAll(); } return partitions; } - private void dropPartitionsNoTxn(String dbName, String tblName, List partNames) { + private void dropPartitionsNoTxn(String catName, String dbName, String tblName, List partNames) { ObjectPair> queryWithParams = - getPartQueryWithParams(dbName, tblName, partNames); + getPartQueryWithParams(catName, dbName, tblName, partNames); Query query = queryWithParams.getFirst(); query.setClass(MPartition.class); long deleted = query.deletePersistentAll(queryWithParams.getSecond()); @@ -3145,9 +3344,9 @@ private void dropPartitionsNoTxn(String dbName, String tblName, List par * they are referenced by other SDs. */ private HashSet detachCdsFromSdsNoTxn( - String dbName, String tblName, List partNames) { + String catName, String dbName, String tblName, List partNames) { ObjectPair> queryWithParams = - getPartQueryWithParams(dbName, tblName, partNames); + getPartQueryWithParams(catName, dbName, tblName, partNames); Query query = queryWithParams.getFirst(); query.setClass(MPartition.class); query.setResult("sd"); @@ -3167,9 +3366,10 @@ private void dropPartitionsNoTxn(String dbName, String tblName, List par return candidateCds; } - private ObjectPair> getPartQueryWithParams(String dbName, - String tblName, List partNames) { - StringBuilder sb = new StringBuilder("table.tableName == t1 && table.database.name == t2 && ("); + private ObjectPair> getPartQueryWithParams( + String catName, String dbName, String tblName, List partNames) { + StringBuilder sb = new StringBuilder("table.tableName == t1 && table.database.name == t2 &&" + + " table.database.catalogName == t3 && ("); int n = 0; Map params = new HashMap<>(); for (Iterator itr = partNames.iterator(); itr.hasNext();) { @@ -3187,14 +3387,15 @@ private void dropPartitionsNoTxn(String dbName, String tblName, List par LOG.debug(" JDOQL filter is {}", sb); params.put("t1", normalizeIdentifier(tblName)); params.put("t2", normalizeIdentifier(dbName)); + params.put("t3", normalizeIdentifier(catName)); query.declareParameters(makeParameterDeclarationString(params)); return new ObjectPair<>(query, params); } @Override - public List getPartitionsByFilter(String dbName, String tblName, + public List getPartitionsByFilter(String catName, String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException { - return getPartitionsByFilterInternal(dbName, tblName, filter, maxParts, true, true); + return getPartitionsByFilterInternal(catName, dbName, tblName, filter, maxParts, true, true); } /** Helper class for getting stuff w/transaction, direct SQL, perf logging, etc. */ @@ -3204,14 +3405,15 @@ private void dropPartitionsNoTxn(String dbName, String tblName, List par private boolean doUseDirectSql; private long start; private Table table; - protected final String dbName, tblName; + protected final String catName, dbName, tblName; private boolean success = false; protected T results = null; - public GetHelper(String dbName, String tblName, boolean allowSql, boolean allowJdo) - throws MetaException { + public GetHelper(String catalogName, String dbName, String tblName, + boolean allowSql, boolean allowJdo) throws MetaException { assert allowSql || allowJdo; this.allowJdo = allowJdo; + this.catName = normalizeIdentifier(catalogName); this.dbName = normalizeIdentifier(dbName); if (tblName != null){ this.tblName = normalizeIdentifier(tblName); @@ -3282,7 +3484,7 @@ private void start(boolean initTable) throws MetaException, NoSuchObjectExceptio start = doTrace ? System.nanoTime() : 0; openTransaction(); if (initTable && (tblName != null)) { - table = ensureGetTable(dbName, tblName); + table = ensureGetTable(catName, dbName, tblName); } doUseDirectSql = doUseDirectSql && canUseDirectSql(this); } @@ -3322,7 +3524,7 @@ private void handleDirectSqlError(Exception ex) throws MetaException, NoSuchObje start = doTrace ? System.nanoTime() : 0; openTransaction(); if (table != null) { - table = ensureGetTable(dbName, tblName); + table = ensureGetTable(catName, dbName, tblName); } } else { start = doTrace ? System.nanoTime() : 0; @@ -3391,9 +3593,9 @@ public Table getTable() { } private abstract class GetListHelper extends GetHelper> { - public GetListHelper( - String dbName, String tblName, boolean allowSql, boolean allowJdo) throws MetaException { - super(dbName, tblName, allowSql, allowJdo); + public GetListHelper(String catName, String dbName, String tblName, boolean allowSql, + boolean allowJdo) throws MetaException { + super(catName, dbName, tblName, allowSql, allowJdo); } @Override @@ -3411,9 +3613,9 @@ protected String describeResult() { * @param allowJdo Whether or not we allow ORM to perform this query. * @throws MetaException */ - public GetDbHelper( - String dbName,boolean allowSql, boolean allowJdo) throws MetaException { - super(dbName,null,allowSql,allowJdo); + public GetDbHelper(String catalogName, String dbName,boolean allowSql, boolean allowJdo) + throws MetaException { + super(catalogName, dbName,null,allowSql,allowJdo); } @Override @@ -3423,9 +3625,9 @@ protected String describeResult() { } private abstract class GetStatHelper extends GetHelper { - public GetStatHelper( - String dbName, String tblName, boolean allowSql, boolean allowJdo) throws MetaException { - super(dbName, tblName, allowSql, allowJdo); + public GetStatHelper(String catalogName, String dbName, String tblName, boolean allowSql, + boolean allowJdo) throws MetaException { + super(catalogName, dbName, tblName, allowSql, allowJdo); } @Override @@ -3435,12 +3637,12 @@ protected String describeResult() { } @Override - public int getNumPartitionsByFilter(String dbName, String tblName, + public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException { final ExpressionTree exprTree = org.apache.commons.lang.StringUtils.isNotEmpty(filter) ? PartFilterExprUtil.getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; - return new GetHelper(dbName, tblName, true, true) { + return new GetHelper(catName, dbName, tblName, true, true) { private final SqlFilterForPushdown filter = new SqlFilterForPushdown(); @Override @@ -3466,13 +3668,13 @@ protected Integer getJdoResult( } @Override - public int getNumPartitionsByExpr(String dbName, String tblName, - byte[] expr) throws MetaException, NoSuchObjectException { + public int getNumPartitionsByExpr(String catName, String dbName, String tblName, + byte[] expr) throws MetaException, NoSuchObjectException { final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr); final byte[] tempExpr = expr; // Need to be final to pass it to an inner class - return new GetHelper(dbName, tblName, true, true) { + return new GetHelper(catName, dbName, tblName, true, true) { private final SqlFilterForPushdown filter = new SqlFilterForPushdown(); @Override @@ -3514,12 +3716,13 @@ protected Integer getJdoResult( }.run(true); } - protected List getPartitionsByFilterInternal(String dbName, String tblName, - String filter, final short maxParts, boolean allowSql, boolean allowJdo) + protected List getPartitionsByFilterInternal( + String catName, String dbName, String tblName, String filter, final short maxParts, + boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { final ExpressionTree tree = (filter != null && !filter.isEmpty()) ? PartFilterExprUtil.getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; - return new GetListHelper(dbName, tblName, allowSql, allowJdo) { + return new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) { private final SqlFilterForPushdown filter = new SqlFilterForPushdown(); @Override @@ -3546,19 +3749,19 @@ protected boolean canUseDirectSql(GetHelper> ctx) throws MetaExc * @param tblName Table name. * @return Table object. */ - private MTable ensureGetMTable( - String dbName, String tblName) throws NoSuchObjectException, MetaException { - MTable mtable = getMTable(dbName, tblName); + private MTable ensureGetMTable(String catName, String dbName, String tblName) + throws NoSuchObjectException, MetaException { + MTable mtable = getMTable(catName, dbName, tblName); if (mtable == null) { - throw new NoSuchObjectException("Specified database/table does not exist : " - + dbName + "." + tblName); + throw new NoSuchObjectException("Specified catalog.database.table does not exist : " + + getCatalogQualifiedTableName(catName, dbName, tblName)); } return mtable; } - private Table ensureGetTable( - String dbName, String tblName) throws NoSuchObjectException, MetaException { - return convertToTable(ensureGetMTable(dbName, tblName)); + private Table ensureGetTable(String catName, String dbName, String tblName) + throws NoSuchObjectException, MetaException { + return convertToTable(ensureGetMTable(catName, dbName, tblName)); } /** @@ -3571,11 +3774,11 @@ private Table ensureGetTable( * @param params Parameters for the filter. Some parameters may be added here. * @return Resulting filter. */ - private String makeQueryFilterString(String dbName, MTable mtable, String filter, + private String makeQueryFilterString(String catName, String dbName, MTable mtable, String filter, Map params) throws MetaException { ExpressionTree tree = (filter != null && !filter.isEmpty()) ? PartFilterExprUtil.getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; - return makeQueryFilterString(dbName, convertToTable(mtable), tree, params, true); + return makeQueryFilterString(catName, dbName, convertToTable(mtable), tree, params, true); } /** @@ -3589,17 +3792,20 @@ private String makeQueryFilterString(String dbName, MTable mtable, String filter * by the client; if it was and we fail to create a filter, we will throw. * @return Resulting filter. Can be null if isValidatedFilter is false, and there was error. */ - private String makeQueryFilterString(String dbName, Table table, ExpressionTree tree, - Map params, boolean isValidatedFilter) throws MetaException { + private String makeQueryFilterString(String catName, String dbName, Table table, + ExpressionTree tree, Map params, + boolean isValidatedFilter) throws MetaException { assert tree != null; FilterBuilder queryBuilder = new FilterBuilder(isValidatedFilter); if (table != null) { - queryBuilder.append("table.tableName == t1 && table.database.name == t2"); + queryBuilder.append("table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3"); params.put("t1", table.getTableName()); params.put("t2", table.getDbName()); + params.put("t3", table.getCatName()); } else { - queryBuilder.append("database.name == dbName"); + queryBuilder.append("database.name == dbName && database.catalogName == catName"); params.put("dbName", dbName); + params.put("catName", catName); } tree.generateJDOFilterFragment(getConf(), table, params, queryBuilder); @@ -3617,7 +3823,8 @@ private String makeParameterDeclarationString(Map params) { //Create the parameter declaration string StringBuilder paramDecl = new StringBuilder(); for (String key : params.keySet()) { - paramDecl.append(", java.lang.String " + key); + paramDecl.append(", java.lang.String ") + .append(key); } return paramDecl.toString(); } @@ -3635,17 +3842,18 @@ private String makeParameterDeclarationStringObj(Map params) { } @Override - public List listTableNamesByFilter(String dbName, String filter, short maxTables) - throws MetaException { + public List listTableNamesByFilter(String catName, String dbName, String filter, + short maxTables) throws MetaException { boolean success = false; Query query = null; List tableNames = new ArrayList<>(); try { openTransaction(); LOG.debug("Executing listTableNamesByFilter"); + catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); Map params = new HashMap<>(); - String queryFilterString = makeQueryFilterString(dbName, null, filter, params); + String queryFilterString = makeQueryFilterString(catName, dbName, null, filter, params); query = pm.newQuery(MTable.class); query.declareImports("import java.lang.String"); query.setResult("tableName"); @@ -3676,50 +3884,7 @@ private String makeParameterDeclarationStringObj(Map params) { } @Override - public List listPartitionNamesByFilter(String dbName, String tableName, String filter, - short maxParts) throws MetaException { - boolean success = false; - Query query = null; - List partNames = new ArrayList<>(); - try { - openTransaction(); - LOG.debug("Executing listMPartitionNamesByFilter"); - dbName = normalizeIdentifier(dbName); - tableName = normalizeIdentifier(tableName); - MTable mtable = getMTable(dbName, tableName); - if (mtable == null) { - // To be consistent with the behavior of listPartitionNames, if the - // table or db does not exist, we return an empty list - return partNames; - } - Map params = new HashMap<>(); - String queryFilterString = makeQueryFilterString(dbName, mtable, filter, params); - query = - pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " - + "where " + queryFilterString); - if (maxParts >= 0) { - // User specified a row limit, set it on the Query - query.setRange(0, maxParts); - } - LOG.debug("Filter specified is {}, JDOQL filter is {}", filter, queryFilterString); - LOG.debug("Parms is {}", params); - String parameterDeclaration = makeParameterDeclarationStringObj(params); - query.declareParameters(parameterDeclaration); - query.setOrdering("partitionName ascending"); - query.setResult("partitionName"); - Collection names = (Collection) query.executeWithMap(params); - partNames = new ArrayList<>(names); - LOG.debug("Done executing query for listMPartitionNamesByFilter"); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listMPartitionNamesByFilter"); - } finally { - rollbackAndCleanup(success, query); - } - return partNames; - } - - @Override - public void alterTable(String dbname, String name, Table newTable) + public void alterTable(String catName, String dbname, String name, Table newTable) throws InvalidObjectException, MetaException { boolean success = false; boolean registerCreationSignature = false; @@ -3727,12 +3892,13 @@ public void alterTable(String dbname, String name, Table newTable) openTransaction(); name = normalizeIdentifier(name); dbname = normalizeIdentifier(dbname); + catName = normalizeIdentifier(catName); MTable newt = convertToMTable(newTable); if (newt == null) { throw new InvalidObjectException("new table is invalid"); } - MTable oldt = getMTable(dbname, name); + MTable oldt = getMTable(catName, dbname, name); if (oldt == null) { throw new MetaException("table " + dbname + "." + name + " doesn't exist"); } @@ -3769,16 +3935,17 @@ public void alterTable(String dbname, String name, Table newTable) } @Override - public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) throws MetaException { boolean success = false; try { openTransaction(); + catName = normalizeIdentifier(catName); dbname = normalizeIdentifier(dbname); tablename = normalizeIdentifier(tablename); // Update creation metadata MCreationMetadata newMcm = convertToMCreationMetadata(cm); - MCreationMetadata mcm = getCreationMetadata(dbname, tablename); + MCreationMetadata mcm = getCreationMetadata(catName, dbname, tablename); mcm.setTables(newMcm.getTables()); mcm.setTxnList(newMcm.getTxnList()); // commit the changes @@ -3804,11 +3971,13 @@ public void updateCreationMetadata(String dbname, String tablename, CreationMeta * @throws InvalidObjectException * @throws MetaException */ - private MColumnDescriptor alterPartitionNoTxn(String dbname, String name, List part_vals, - Partition newPart) throws InvalidObjectException, MetaException { + private MColumnDescriptor alterPartitionNoTxn(String catName, String dbname, String name, + List part_vals, Partition newPart) + throws InvalidObjectException, MetaException { + catName = normalizeIdentifier(catName); name = normalizeIdentifier(name); dbname = normalizeIdentifier(dbname); - MPartition oldp = getMPartition(dbname, name, part_vals); + MPartition oldp = getMPartition(catName, dbname, name, part_vals); MPartition newp = convertToMPart(newPart, false); MColumnDescriptor oldCD = null; MStorageDescriptor oldSD = oldp.getSd(); @@ -3834,13 +4003,13 @@ private MColumnDescriptor alterPartitionNoTxn(String dbname, String name, List part_vals, Partition newPart) - throws InvalidObjectException, MetaException { + public void alterPartition(String catName, String dbname, String name, List part_vals, + Partition newPart) throws InvalidObjectException, MetaException { boolean success = false; Exception e = null; try { openTransaction(); - MColumnDescriptor oldCd = alterPartitionNoTxn(dbname, name, part_vals, newPart); + MColumnDescriptor oldCd = alterPartitionNoTxn(catName, dbname, name, part_vals, newPart); removeUnusedColumnDescriptor(oldCd); // commit the changes success = commitTransaction(); @@ -3860,8 +4029,9 @@ public void alterPartition(String dbname, String name, List part_vals, P } @Override - public void alterPartitions(String dbname, String name, List> part_vals, - List newParts) throws InvalidObjectException, MetaException { + public void alterPartitions(String catName, String dbname, String name, + List> part_vals, List newParts) + throws InvalidObjectException, MetaException { boolean success = false; Exception e = null; try { @@ -3870,7 +4040,7 @@ public void alterPartitions(String dbname, String name, List> part_ Set oldCds = new HashSet<>(); for (Partition tmpPart: newParts) { List tmpPartVals = part_val_itr.next(); - MColumnDescriptor oldCd = alterPartitionNoTxn(dbname, name, tmpPartVals, tmpPart); + MColumnDescriptor oldCd = alterPartitionNoTxn(catName, dbname, name, tmpPartVals, tmpPart); if (oldCd != null) { oldCds.add(oldCd); } @@ -4125,16 +4295,27 @@ private String getGuidFromDB() throws MetaException { if (CollectionUtils.isNotEmpty(foreignKeys)) { List mpkfks = new ArrayList<>(); String currentConstraintName = null; + String catName = null; // We start iterating through the foreign keys. This list might contain more than a single // foreign key, and each foreign key might contain multiple columns. The outer loop retrieves // the information that is common for a single key (table information) while the inner loop // checks / adds information about each column. for (int i = 0; i < foreignKeys.size(); i++) { + if (catName == null) { + catName = normalizeIdentifier(foreignKeys.get(i).isSetCatName() ? foreignKeys.get(i).getCatName() : + getDefaultCatalog(conf)); + } else { + String tmpCatName = normalizeIdentifier(foreignKeys.get(i).isSetCatName() ? + foreignKeys.get(i).getCatName() : getDefaultCatalog(conf)); + if (!catName.equals(tmpCatName)) { + throw new InvalidObjectException("Foreign keys cannot span catalogs"); + } + } final String fkTableDB = normalizeIdentifier(foreignKeys.get(i).getFktable_db()); final String fkTableName = normalizeIdentifier(foreignKeys.get(i).getFktable_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. - final AttachedMTableInfo nChildTable = getMTable(fkTableDB, fkTableName, retrieveCD); + final AttachedMTableInfo nChildTable = getMTable(catName, fkTableDB, fkTableName, retrieveCD); final MTable childTable = nChildTable.mtbl; if (childTable == null) { throw new InvalidObjectException("Child table not found: " + fkTableName); @@ -4166,7 +4347,7 @@ private String getGuidFromDB() throws MetaException { existingTablePrimaryKeys = primaryKeys; existingTableUniqueConstraints = uniqueConstraints; } else { - nParentTable = getMTable(pkTableDB, pkTableName, true); + nParentTable = getMTable(catName, pkTableDB, pkTableName, true); parentTable = nParentTable.mtbl; if (parentTable == null) { throw new InvalidObjectException("Parent table not found: " + pkTableName); @@ -4177,8 +4358,8 @@ private String getGuidFromDB() throws MetaException { if (parentTable.getPartitionKeys() != null) { parentCols.addAll(parentTable.getPartitionKeys()); } - existingTablePrimaryKeys = getPrimaryKeys(pkTableDB, pkTableName); - existingTableUniqueConstraints = getUniqueConstraints(pkTableDB, pkTableName); + existingTablePrimaryKeys = getPrimaryKeys(catName, pkTableDB, pkTableName); + existingTableUniqueConstraints = getUniqueConstraints(catName, pkTableDB, pkTableName); } // Here we build an aux structure that is used to verify that the foreign key that is declared @@ -4353,13 +4534,14 @@ private static String generateColNameTypeSignature(String colName, String colTyp String constraintName = null; for (int i = 0; i < pks.size(); i++) { + final String catName = normalizeIdentifier(pks.get(i).getCatName()); final String tableDB = normalizeIdentifier(pks.get(i).getTable_db()); final String tableName = normalizeIdentifier(pks.get(i).getTable_name()); final String columnName = normalizeIdentifier(pks.get(i).getColumn_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. - AttachedMTableInfo nParentTable = getMTable(tableDB, tableName, retrieveCD); + AttachedMTableInfo nParentTable = getMTable(catName, tableDB, tableName, retrieveCD); MTable parentTable = nParentTable.mtbl; if (parentTable == null) { throw new InvalidObjectException("Parent table not found: " + tableName); @@ -4376,10 +4558,10 @@ private static String generateColNameTypeSignature(String colName, String colTyp throw new InvalidObjectException("Parent column not found: " + columnName); } } - if (getPrimaryKeyConstraintName( + if (getPrimaryKeyConstraintName(parentTable.getDatabase().getCatalogName(), parentTable.getDatabase().getName(), parentTable.getTableName()) != null) { throw new MetaException(" Primary key already exists for: " + - parentTable.getDatabase().getName() + "." + pks.get(i).getTable_name()); + getCatalogQualifiedTableName(catName, tableDB, tableName)); } if (pks.get(i).getPk_name() == null) { if (pks.get(i).getKey_seq() == 1) { @@ -4426,13 +4608,14 @@ private static String generateColNameTypeSignature(String colName, String colTyp String constraintName = null; for (int i = 0; i < uks.size(); i++) { + final String catName = normalizeIdentifier(uks.get(i).getCatName()); final String tableDB = normalizeIdentifier(uks.get(i).getTable_db()); final String tableName = normalizeIdentifier(uks.get(i).getTable_name()); final String columnName = normalizeIdentifier(uks.get(i).getColumn_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. - AttachedMTableInfo nParentTable = getMTable(tableDB, tableName, retrieveCD); + AttachedMTableInfo nParentTable = getMTable(catName, tableDB, tableName, retrieveCD); MTable parentTable = nParentTable.mtbl; if (parentTable == null) { throw new InvalidObjectException("Parent table not found: " + tableName); @@ -4500,29 +4683,30 @@ private static String generateColNameTypeSignature(String colName, String colTyp return addCheckConstraints(nns, true); } - private List addCheckConstraints(List nns, boolean retrieveCD) + private List addCheckConstraints(List cc, boolean retrieveCD) throws InvalidObjectException, MetaException { List nnNames = new ArrayList<>(); List cstrs = new ArrayList<>(); String constraintName = null; - for (int i = 0; i < nns.size(); i++) { - final String tableDB = normalizeIdentifier(nns.get(i).getTable_db()); - final String tableName = normalizeIdentifier(nns.get(i).getTable_name()); - final String columnName = normalizeIdentifier(nns.get(i).getColumn_name()); - final String ccName = nns.get(i).getDc_name(); - boolean isEnable = nns.get(i).isEnable_cstr(); - boolean isValidate = nns.get(i).isValidate_cstr(); - boolean isRely = nns.get(i).isRely_cstr(); - String constraintValue = nns.get(i).getCheck_expression(); - addConstraint(tableDB, tableName, columnName, ccName, isEnable, isRely, isValidate, + for (int i = 0; i < cc.size(); i++) { + final String catName = normalizeIdentifier(cc.get(i).getCatName()); + final String tableDB = normalizeIdentifier(cc.get(i).getTable_db()); + final String tableName = normalizeIdentifier(cc.get(i).getTable_name()); + final String columnName = normalizeIdentifier(cc.get(i).getColumn_name()); + final String ccName = cc.get(i).getDc_name(); + boolean isEnable = cc.get(i).isEnable_cstr(); + boolean isValidate = cc.get(i).isValidate_cstr(); + boolean isRely = cc.get(i).isRely_cstr(); + String constraintValue = cc.get(i).getCheck_expression(); + addConstraint(catName, tableDB, tableName, columnName, ccName, isEnable, isRely, isValidate, MConstraint.CHECK_CONSTRAINT, constraintValue, retrieveCD, nnNames, cstrs); } pm.makePersistentAll(cstrs); return nnNames; } - private void addConstraint(String tableDB, String tableName, String columnName, String ccName, + private void addConstraint(String catName, String tableDB, String tableName, String columnName, String ccName, boolean isEnable, boolean isRely, boolean isValidate, int constraintType, String constraintValue, boolean retrieveCD, List nnNames, List cstrs) @@ -4530,7 +4714,7 @@ private void addConstraint(String tableDB, String tableName, String columnName, String constraintName = null; // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. - AttachedMTableInfo nParentTable = getMTable(tableDB, tableName, retrieveCD); + AttachedMTableInfo nParentTable = getMTable(catName, tableDB, tableName, retrieveCD); MTable parentTable = nParentTable.mtbl; if (parentTable == null) { throw new InvalidObjectException("Parent table not found: " + tableName); @@ -4583,6 +4767,7 @@ private void addConstraint(String tableDB, String tableName, String columnName, String constraintName = null; for (int i = 0; i < nns.size(); i++) { + final String catName = normalizeIdentifier(nns.get(i).getCatName()); final String tableDB = normalizeIdentifier(nns.get(i).getTable_db()); final String tableName = normalizeIdentifier(nns.get(i).getTable_name()); final String columnName = normalizeIdentifier(nns.get(i).getColumn_name()); @@ -4591,7 +4776,7 @@ private void addConstraint(String tableDB, String tableName, String columnName, boolean isValidate = nns.get(i).isValidate_cstr(); boolean isRely = nns.get(i).isRely_cstr(); String constraintValue = nns.get(i).getDefault_value(); - addConstraint(tableDB, tableName, columnName, ccName, isEnable, isRely, isValidate, + addConstraint(catName, tableDB, tableName, columnName, ccName, isEnable, isRely, isValidate, MConstraint.DEFAULT_CONSTRAINT, constraintValue, retrieveCD, nnNames, cstrs); } pm.makePersistentAll(cstrs); @@ -4605,13 +4790,14 @@ private void addConstraint(String tableDB, String tableName, String columnName, String constraintName = null; for (int i = 0; i < nns.size(); i++) { + final String catName = normalizeIdentifier(nns.get(i).getCatName()); final String tableDB = normalizeIdentifier(nns.get(i).getTable_db()); final String tableName = normalizeIdentifier(nns.get(i).getTable_name()); final String columnName = normalizeIdentifier(nns.get(i).getColumn_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. - AttachedMTableInfo nParentTable = getMTable(tableDB, tableName, retrieveCD); + AttachedMTableInfo nParentTable = getMTable(catName, tableDB, tableName, retrieveCD); MTable parentTable = nParentTable.mtbl; if (parentTable == null) { throw new InvalidObjectException("Parent table not found: " + tableName); @@ -5093,14 +5279,15 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, return ret; } - public List getDBPrivilege(String dbName, + private List getDBPrivilege(String catName, String dbName, String principalName, PrincipalType principalType) throws InvalidObjectException, MetaException { + catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); if (principalName != null) { List userNameDbPriv = this.listPrincipalMDBGrants( - principalName, principalType, dbName); + principalName, principalType, catName, dbName); if (CollectionUtils.isNotEmpty(userNameDbPriv)) { List grantInfos = new ArrayList<>( userNameDbPriv.size()); @@ -5118,10 +5305,11 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, @Override - public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, + public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException { boolean commited = false; + catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); @@ -5129,14 +5317,14 @@ public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, openTransaction(); if (userName != null) { Map> dbUserPriv = new HashMap<>(); - dbUserPriv.put(userName, getDBPrivilege(dbName, userName, + dbUserPriv.put(userName, getDBPrivilege(catName, dbName, userName, PrincipalType.USER)); ret.setUserPrivileges(dbUserPriv); } if (CollectionUtils.isNotEmpty(groupNames)) { Map> dbGroupPriv = new HashMap<>(); for (String groupName : groupNames) { - dbGroupPriv.put(groupName, getDBPrivilege(dbName, groupName, + dbGroupPriv.put(groupName, getDBPrivilege(catName, dbName, groupName, PrincipalType.GROUP)); } ret.setGroupPrivileges(dbGroupPriv); @@ -5146,7 +5334,7 @@ public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, Map> dbRolePriv = new HashMap<>(); for (String roleName : roleNames) { dbRolePriv - .put(roleName, getDBPrivilege(dbName, roleName, PrincipalType.ROLE)); + .put(roleName, getDBPrivilege(catName, dbName, roleName, PrincipalType.ROLE)); } ret.setRolePrivileges(dbRolePriv); } @@ -5160,26 +5348,27 @@ public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, } @Override - public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, + public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName, String partition, String userName, List groupNames) throws InvalidObjectException, MetaException { boolean commited = false; PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); try { openTransaction(); if (userName != null) { Map> partUserPriv = new HashMap<>(); - partUserPriv.put(userName, getPartitionPrivilege(dbName, + partUserPriv.put(userName, getPartitionPrivilege(catName, dbName, tableName, partition, userName, PrincipalType.USER)); ret.setUserPrivileges(partUserPriv); } if (CollectionUtils.isNotEmpty(groupNames)) { Map> partGroupPriv = new HashMap<>(); for (String groupName : groupNames) { - partGroupPriv.put(groupName, getPartitionPrivilege(dbName, tableName, + partGroupPriv.put(groupName, getPartitionPrivilege(catName, dbName, tableName, partition, groupName, PrincipalType.GROUP)); } ret.setGroupPrivileges(partGroupPriv); @@ -5188,7 +5377,7 @@ public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, if (CollectionUtils.isNotEmpty(roleNames)) { Map> partRolePriv = new HashMap<>(); for (String roleName : roleNames) { - partRolePriv.put(roleName, getPartitionPrivilege(dbName, tableName, + partRolePriv.put(roleName, getPartitionPrivilege(catName, dbName, tableName, partition, roleName, PrincipalType.ROLE)); } ret.setRolePrivileges(partRolePriv); @@ -5203,26 +5392,27 @@ public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, } @Override - public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, + public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName, String userName, List groupNames) throws InvalidObjectException, MetaException { boolean commited = false; PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); tableName = normalizeIdentifier(tableName); + catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); try { openTransaction(); if (userName != null) { Map> tableUserPriv = new HashMap<>(); - tableUserPriv.put(userName, getTablePrivilege(dbName, + tableUserPriv.put(userName, getTablePrivilege(catName, dbName, tableName, userName, PrincipalType.USER)); ret.setUserPrivileges(tableUserPriv); } if (CollectionUtils.isNotEmpty(groupNames)) { Map> tableGroupPriv = new HashMap<>(); for (String groupName : groupNames) { - tableGroupPriv.put(groupName, getTablePrivilege(dbName, tableName, + tableGroupPriv.put(groupName, getTablePrivilege(catName, dbName, tableName, groupName, PrincipalType.GROUP)); } ret.setGroupPrivileges(tableGroupPriv); @@ -5231,7 +5421,7 @@ public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, if (CollectionUtils.isNotEmpty(roleNames)) { Map> tableRolePriv = new HashMap<>(); for (String roleName : roleNames) { - tableRolePriv.put(roleName, getTablePrivilege(dbName, tableName, + tableRolePriv.put(roleName, getTablePrivilege(catName, dbName, tableName, roleName, PrincipalType.ROLE)); } ret.setRolePrivileges(tableRolePriv); @@ -5246,13 +5436,14 @@ public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, } @Override - public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, + public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException { tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); columnName = normalizeIdentifier(columnName); + catName = normalizeIdentifier(catName); boolean commited = false; PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); @@ -5260,14 +5451,14 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, openTransaction(); if (userName != null) { Map> columnUserPriv = new HashMap<>(); - columnUserPriv.put(userName, getColumnPrivilege(dbName, tableName, + columnUserPriv.put(userName, getColumnPrivilege(catName, dbName, tableName, columnName, partitionName, userName, PrincipalType.USER)); ret.setUserPrivileges(columnUserPriv); } if (CollectionUtils.isNotEmpty(groupNames)) { Map> columnGroupPriv = new HashMap<>(); for (String groupName : groupNames) { - columnGroupPriv.put(groupName, getColumnPrivilege(dbName, tableName, + columnGroupPriv.put(groupName, getColumnPrivilege(catName, dbName, tableName, columnName, partitionName, groupName, PrincipalType.GROUP)); } ret.setGroupPrivileges(columnGroupPriv); @@ -5276,7 +5467,7 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, if (CollectionUtils.isNotEmpty(roleNames)) { Map> columnRolePriv = new HashMap<>(); for (String roleName : roleNames) { - columnRolePriv.put(roleName, getColumnPrivilege(dbName, tableName, + columnRolePriv.put(roleName, getColumnPrivilege(catName, dbName, tableName, columnName, partitionName, roleName, PrincipalType.ROLE)); } ret.setRolePrivileges(columnRolePriv); @@ -5290,17 +5481,18 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, return ret; } - private List getPartitionPrivilege(String dbName, + private List getPartitionPrivilege(String catName, String dbName, String tableName, String partName, String principalName, PrincipalType principalType) { tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); if (principalName != null) { List userNameTabPartPriv = this .listPrincipalMPartitionGrants(principalName, principalType, - dbName, tableName, partName); + catName, dbName, tableName, partName); if (CollectionUtils.isNotEmpty(userNameTabPartPriv)) { List grantInfos = new ArrayList<>( userNameTabPartPriv.size()); @@ -5321,15 +5513,16 @@ private PrincipalType getPrincipalTypeFromStr(String str) { return str == null ? null : PrincipalType.valueOf(str); } - private List getTablePrivilege(String dbName, + private List getTablePrivilege(String catName, String dbName, String tableName, String principalName, PrincipalType principalType) { tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); if (principalName != null) { List userNameTabPartPriv = this .listAllMTableGrants(principalName, principalType, - dbName, tableName); + catName, dbName, tableName); if (CollectionUtils.isNotEmpty(userNameTabPartPriv)) { List grantInfos = new ArrayList<>( userNameTabPartPriv.size()); @@ -5345,18 +5538,19 @@ private PrincipalType getPrincipalTypeFromStr(String str) { return new ArrayList<>(0); } - private List getColumnPrivilege(String dbName, + private List getColumnPrivilege(String catName, String dbName, String tableName, String columnName, String partitionName, String principalName, PrincipalType principalType) { tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); columnName = normalizeIdentifier(columnName); + catName = normalizeIdentifier(catName); if (partitionName == null) { List userNameColumnPriv = this .listPrincipalMTableColumnGrants(principalName, principalType, - dbName, tableName, columnName); + catName, dbName, tableName, columnName); if (CollectionUtils.isNotEmpty(userNameColumnPriv)) { List grantInfos = new ArrayList<>( userNameColumnPriv.size()); @@ -5371,7 +5565,7 @@ private PrincipalType getPrincipalTypeFromStr(String str) { } else { List userNameColumnPriv = this .listPrincipalMPartitionColumnGrants(principalName, - principalType, dbName, tableName, partitionName, columnName); + principalType, catName, dbName, tableName, partitionName, columnName); if (CollectionUtils.isNotEmpty(userNameColumnPriv)) { List grantInfos = new ArrayList<>( userNameColumnPriv.size()); @@ -5417,6 +5611,8 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce validateRole(userName); } + String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : + getDefaultCatalog(conf); if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { List globalPrivs = this .listPrincipalMGlobalGrants(userName, principalType); @@ -5437,10 +5633,10 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce persistentObjs.add(mGlobalPrivs); } } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { - MDatabase dbObj = getMDatabase(hiveObject.getDbName()); + MDatabase dbObj = getMDatabase(catName, hiveObject.getDbName()); if (dbObj != null) { List dbPrivs = this.listPrincipalMDBGrants( - userName, principalType, hiveObject.getDbName()); + userName, principalType, catName, hiveObject.getDbName()); if (dbPrivs != null) { for (MDBPrivilege priv : dbPrivs) { if (priv.getGrantor().equalsIgnoreCase(grantor)) { @@ -5460,12 +5656,12 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce } } } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { - MTable tblObj = getMTable(hiveObject.getDbName(), hiveObject + MTable tblObj = getMTable(catName, hiveObject.getDbName(), hiveObject .getObjectName()); if (tblObj != null) { List tablePrivs = this .listAllMTableGrants(userName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName()); + catName, hiveObject.getDbName(), hiveObject.getObjectName()); if (tablePrivs != null) { for (MTablePrivilege priv : tablePrivs) { if (priv.getGrantor() != null @@ -5488,14 +5684,14 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce } } } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { - MPartition partObj = this.getMPartition(hiveObject.getDbName(), + MPartition partObj = this.getMPartition(catName, hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject.getPartValues()); String partName = null; if (partObj != null) { partName = partObj.getPartitionName(); List partPrivs = this .listPrincipalMPartitionGrants(userName, - principalType, hiveObject.getDbName(), hiveObject + principalType, catName, hiveObject.getDbName(), hiveObject .getObjectName(), partObj.getPartitionName()); if (partPrivs != null) { for (MPartitionPrivilege priv : partPrivs) { @@ -5519,19 +5715,19 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce } } } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { - MTable tblObj = getMTable(hiveObject.getDbName(), hiveObject + MTable tblObj = getMTable(catName, hiveObject.getDbName(), hiveObject .getObjectName()); if (tblObj != null) { if (hiveObject.getPartValues() != null) { MPartition partObj = null; List colPrivs = null; - partObj = this.getMPartition(hiveObject.getDbName(), hiveObject + partObj = this.getMPartition(catName, hiveObject.getDbName(), hiveObject .getObjectName(), hiveObject.getPartValues()); if (partObj == null) { continue; } colPrivs = this.listPrincipalMPartitionColumnGrants( - userName, principalType, hiveObject.getDbName(), hiveObject + userName, principalType, catName, hiveObject.getDbName(), hiveObject .getObjectName(), partObj.getPartitionName(), hiveObject.getColumnName()); @@ -5561,7 +5757,7 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce } else { List colPrivs = null; colPrivs = this.listPrincipalMTableColumnGrants( - userName, principalType, hiveObject.getDbName(), hiveObject + userName, principalType, catName, hiveObject.getDbName(), hiveObject .getObjectName(), hiveObject.getColumnName()); if (colPrivs != null) { @@ -5627,6 +5823,8 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) String userName = privDef.getPrincipalName(); PrincipalType principalType = privDef.getPrincipalType(); + String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : + getDefaultCatalog(conf); if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { List mSecUser = this.listPrincipalMGlobalGrants( userName, principalType); @@ -5657,12 +5855,12 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { - MDatabase dbObj = getMDatabase(hiveObject.getDbName()); + MDatabase dbObj = getMDatabase(catName, hiveObject.getDbName()); if (dbObj != null) { String db = hiveObject.getDbName(); boolean found = false; List dbGrants = this.listPrincipalMDBGrants( - userName, principalType, db); + userName, principalType, catName, db); for (String privilege : privs) { for (MDBPrivilege dbGrant : dbGrants) { String dbGrantPriv = dbGrant.getPrivilege(); @@ -5691,7 +5889,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) boolean found = false; List tableGrants = this .listAllMTableGrants(userName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName()); + catName, hiveObject.getDbName(), hiveObject.getObjectName()); for (String privilege : privs) { for (MTablePrivilege tabGrant : tableGrants) { String tableGrantPriv = tabGrant.getPrivilege(); @@ -5718,14 +5916,14 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { boolean found = false; - Table tabObj = this.getTable(hiveObject.getDbName(), hiveObject.getObjectName()); + Table tabObj = this.getTable(catName, hiveObject.getDbName(), hiveObject.getObjectName()); String partName = null; if (hiveObject.getPartValues() != null) { partName = Warehouse.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues()); } List partitionGrants = this .listPrincipalMPartitionGrants(userName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName(), partName); + catName, hiveObject.getDbName(), hiveObject.getObjectName(), partName); for (String privilege : privs) { for (MPartitionPrivilege partGrant : partitionGrants) { String partPriv = partGrant.getPrivilege(); @@ -5751,7 +5949,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { - Table tabObj = this.getTable(hiveObject.getDbName(), hiveObject + Table tabObj = this.getTable(catName, hiveObject.getDbName(), hiveObject .getObjectName()); String partName = null; if (hiveObject.getPartValues() != null) { @@ -5761,7 +5959,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) if (partName != null) { List mSecCol = listPrincipalMPartitionColumnGrants( - userName, principalType, hiveObject.getDbName(), hiveObject + userName, principalType, catName, hiveObject.getDbName(), hiveObject .getObjectName(), partName, hiveObject.getColumnName()); boolean found = false; if (mSecCol != null) { @@ -5793,7 +5991,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } } else { List mSecCol = listPrincipalMTableColumnGrants( - userName, principalType, hiveObject.getDbName(), hiveObject + userName, principalType, catName, hiveObject.getDbName(), hiveObject .getObjectName(), hiveObject.getColumnName()); boolean found = false; if (mSecCol != null) { @@ -5897,7 +6095,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @SuppressWarnings("unchecked") - public List listPrincipalMGlobalGrants(String principalName, + private List listPrincipalMGlobalGrants(String principalName, PrincipalType principalType) { boolean commited = false; Query query = null; @@ -5977,8 +6175,8 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @SuppressWarnings("unchecked") - public List listPrincipalMDBGrants(String principalName, - PrincipalType principalType, String dbName) { + private List listPrincipalMDBGrants(String principalName, + PrincipalType principalType, String catName, String dbName) { boolean success = false; Query query = null; List mSecurityDBList = new ArrayList<>(); @@ -5989,11 +6187,12 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) openTransaction(); query = pm.newQuery(MDBPrivilege.class, - "principalName == t1 && principalType == t2 && database.name == t3"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + "principalName == t1 && principalType == t2 && database.name == t3 && database.catalogName == t4"); + query.declareParameters( + "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4"); List mPrivs = (List) query.executeWithArray(principalName, principalType.toString(), - dbName); + dbName, catName); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6008,8 +6207,8 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) @Override public List listPrincipalDBGrants(String principalName, PrincipalType principalType, - String dbName) { - List mDbs = listPrincipalMDBGrants(principalName, principalType, dbName); + String catName, String dbName) { + List mDbs = listPrincipalMDBGrants(principalName, principalType, catName, dbName); if (mDbs.isEmpty()) { return Collections.emptyList(); } @@ -6018,6 +6217,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) MDBPrivilege sDB = mDbs.get(i); HiveObjectRef objectRef = new HiveObjectRef( HiveObjectType.DATABASE, dbName, null, null, null); + objectRef.setCatName(catName); HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, sDB.getPrincipalName(), principalType, new PrivilegeGrantInfo(sDB.getPrivilege(), sDB @@ -6040,10 +6240,10 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @Override - public List listDBGrantsAll(String dbName) { + public List listDBGrantsAll(String catName, String dbName) { QueryWrapper queryWrapper = new QueryWrapper(); try { - return convertDB(listDatabaseGrants(dbName, queryWrapper)); + return convertDB(listDatabaseGrants(catName, dbName, queryWrapper)); } finally { queryWrapper.close(); } @@ -6058,6 +6258,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.DATABASE, database, null, null, null); + objectRef.setCatName(priv.getDatabase().getCatalogName()); PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); @@ -6099,22 +6300,23 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @SuppressWarnings("unchecked") - public List listAllTableGrants(String dbName, String tableName) { + private List listAllTableGrants(String catName, String dbName, String tableName) { boolean success = false; Query query = null; - tableName = normalizeIdentifier(tableName); - dbName = normalizeIdentifier(dbName); List mSecurityTabList = new ArrayList<>(); tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); try { LOG.debug("Executing listAllTableGrants"); openTransaction(); - String queryStr = "table.tableName == t1 && table.database.name == t2"; + String queryStr = "table.tableName == t1 && table.database.name == t2" + + "&& table.database.catalogName == t3"; query = pm.newQuery(MTablePrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - List mPrivs = (List) query.executeWithArray(tableName, dbName); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + List mPrivs = + (List) query.executeWithArray(tableName, dbName, catName); LOG.debug("Done executing query for listAllTableGrants"); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6129,9 +6331,10 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @SuppressWarnings("unchecked") - public List listTableAllPartitionGrants(String dbName, String tableName) { + private List listTableAllPartitionGrants(String catName, String dbName, String tableName) { tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); boolean success = false; Query query = null; List mSecurityTabPartList = new ArrayList<>(); @@ -6139,10 +6342,12 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) LOG.debug("Executing listTableAllPartitionGrants"); openTransaction(); - String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2"; + String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2 " + + "&& partition.table.database.catalogName == t3"; query = pm.newQuery(MPartitionPrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - List mPrivs = (List) query.executeWithArray(tableName, dbName); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + List mPrivs = + (List) query.executeWithArray(tableName, dbName, catName); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6156,21 +6361,24 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @SuppressWarnings("unchecked") - public List listTableAllColumnGrants(String dbName, String tableName) { + private List listTableAllColumnGrants( + String catName, String dbName, String tableName) { boolean success = false; Query query = null; List mTblColPrivilegeList = new ArrayList<>(); tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); try { LOG.debug("Executing listTableAllColumnGrants"); openTransaction(); - String queryStr = "table.tableName == t1 && table.database.name == t2"; + String queryStr = "table.tableName == t1 && table.database.name == t2 &&" + + "table.database.catalogName == t3"; query = pm.newQuery(MTableColumnPrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); List mPrivs = - (List) query.executeWithArray(tableName, dbName); + (List) query.executeWithArray(tableName, dbName, catName); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6184,22 +6392,24 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @SuppressWarnings("unchecked") - public List listTableAllPartitionColumnGrants(String dbName, - String tableName) { + private List listTableAllPartitionColumnGrants( + String catName, String dbName, String tableName) { boolean success = false; Query query = null; tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); List mSecurityColList = new ArrayList<>(); try { LOG.debug("Executing listTableAllPartitionColumnGrants"); openTransaction(); - String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2"; + String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2 " + + "&& partition.table.database.catalogName == t3"; query = pm.newQuery(MPartitionColumnPrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); List mPrivs = - (List) query.executeWithArray(tableName, dbName); + (List) query.executeWithArray(tableName, dbName, catName); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6213,19 +6423,21 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @SuppressWarnings("unchecked") - public List listPartitionAllColumnGrants(String dbName, - String tableName, List partNames) { + private List listPartitionAllColumnGrants( + String catName, String dbName, String tableName, List partNames) { boolean success = false; tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); List mSecurityColList = null; try { openTransaction(); LOG.debug("Executing listPartitionAllColumnGrants"); - mSecurityColList = queryByPartitionNames( + mSecurityColList = queryByPartitionNames(catName, dbName, tableName, partNames, MPartitionColumnPrivilege.class, - "partition.table.tableName", "partition.table.database.name", "partition.partitionName"); + "partition.table.tableName", "partition.table.database.name", "partition.partitionName", + "partition.table.database.catalogName"); LOG.debug("Done executing query for listPartitionAllColumnGrants"); pm.retrieveAll(mSecurityColList); success = commitTransaction(); @@ -6238,25 +6450,29 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) return mSecurityColList; } - public void dropPartitionAllColumnGrantsNoTxn( - String dbName, String tableName, List partNames) { - ObjectPair queryWithParams = makeQueryByPartitionNames( + private void dropPartitionAllColumnGrantsNoTxn( + String catName, String dbName, String tableName, List partNames) { + ObjectPair queryWithParams = makeQueryByPartitionNames(catName, dbName, tableName, partNames, MPartitionColumnPrivilege.class, - "partition.table.tableName", "partition.table.database.name", "partition.partitionName"); + "partition.table.tableName", "partition.table.database.name", "partition.partitionName", + "partition.table.database.catalogName"); queryWithParams.getFirst().deletePersistentAll(queryWithParams.getSecond()); } @SuppressWarnings("unchecked") - private List listDatabaseGrants(String dbName, QueryWrapper queryWrapper) { + private List listDatabaseGrants(String catName, String dbName, QueryWrapper queryWrapper) { dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); boolean success = false; try { LOG.debug("Executing listDatabaseGrants"); openTransaction(); - Query query = queryWrapper.query = pm.newQuery(MDBPrivilege.class, "database.name == t1"); - query.declareParameters("java.lang.String t1"); - List mSecurityDBList = (List) query.executeWithArray(dbName); + Query query = queryWrapper.query = pm.newQuery(MDBPrivilege.class, + "database.name == t1 && database.catalogName == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + List mSecurityDBList = + (List) query.executeWithArray(dbName, catName); pm.retrieveAll(mSecurityDBList); success = commitTransaction(); LOG.debug("Done retrieving all objects for listDatabaseGrants"); @@ -6269,7 +6485,7 @@ public void dropPartitionAllColumnGrantsNoTxn( } @SuppressWarnings("unchecked") - private List listPartitionGrants(String dbName, String tableName, + private List listPartitionGrants(String catName, String dbName, String tableName, List partNames) { tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); @@ -6279,9 +6495,10 @@ public void dropPartitionAllColumnGrantsNoTxn( try { openTransaction(); LOG.debug("Executing listPartitionGrants"); - mSecurityTabPartList = queryByPartitionNames( + mSecurityTabPartList = queryByPartitionNames(catName, dbName, tableName, partNames, MPartitionPrivilege.class, "partition.table.tableName", - "partition.table.database.name", "partition.partitionName"); + "partition.table.database.name", "partition.partitionName", + "partition.table.database.catalogName"); LOG.debug("Done executing query for listPartitionGrants"); pm.retrieveAll(mSecurityTabPartList); success = commitTransaction(); @@ -6294,32 +6511,36 @@ public void dropPartitionAllColumnGrantsNoTxn( return mSecurityTabPartList; } - private void dropPartitionGrantsNoTxn(String dbName, String tableName, List partNames) { - ObjectPair queryWithParams = makeQueryByPartitionNames( + private void dropPartitionGrantsNoTxn(String catName, String dbName, String tableName, + List partNames) { + ObjectPair queryWithParams = makeQueryByPartitionNames(catName, dbName, tableName, partNames,MPartitionPrivilege.class, "partition.table.tableName", - "partition.table.database.name", "partition.partitionName"); + "partition.table.database.name", "partition.partitionName", + "partition.table.database.catalogName"); queryWithParams.getFirst().deletePersistentAll(queryWithParams.getSecond()); } @SuppressWarnings("unchecked") - private List queryByPartitionNames(String dbName, String tableName, - List partNames, Class clazz, String tbCol, String dbCol, String partCol) { - ObjectPair queryAndParams = makeQueryByPartitionNames( - dbName, tableName, partNames, clazz, tbCol, dbCol, partCol); + private List queryByPartitionNames(String catName, String dbName, String tableName, + List partNames, Class clazz, String tbCol, String dbCol, String partCol, + String catCol) { + ObjectPair queryAndParams = makeQueryByPartitionNames(catName, + dbName, tableName, partNames, clazz, tbCol, dbCol, partCol, catCol); return (List)queryAndParams.getFirst().executeWithArray(queryAndParams.getSecond()); } private ObjectPair makeQueryByPartitionNames( - String dbName, String tableName, List partNames, Class clazz, - String tbCol, String dbCol, String partCol) { - String queryStr = tbCol + " == t1 && " + dbCol + " == t2"; - String paramStr = "java.lang.String t1, java.lang.String t2"; - Object[] params = new Object[2 + partNames.size()]; + String catName, String dbName, String tableName, List partNames, Class clazz, + String tbCol, String dbCol, String partCol, String catCol) { + String queryStr = tbCol + " == t1 && " + dbCol + " == t2 && " + catCol + " == t3"; + String paramStr = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; + Object[] params = new Object[3 + partNames.size()]; params[0] = normalizeIdentifier(tableName); params[1] = normalizeIdentifier(dbName); + params[2] = normalizeIdentifier(catName); int index = 0; for (String partName : partNames) { - params[index + 2] = partName; + params[index + 3] = partName; queryStr += ((index == 0) ? " && (" : " || ") + partCol + " == p" + index; paramStr += ", java.lang.String p" + index; ++index; @@ -6331,11 +6552,12 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listAllMTableGrants( - String principalName, PrincipalType principalType, String dbName, + private List listAllMTableGrants( + String principalName, PrincipalType principalType, String catName, String dbName, String tableName) { tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); boolean success = false; Query query = null; List mSecurityTabPartList = new ArrayList<>(); @@ -6344,12 +6566,14 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mPrivs = (List) query.executeWithArray(principalName, principalType.toString(), - tableName, dbName); + tableName, dbName, catName); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6365,10 +6589,11 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listAllTableGrants(String principalName, PrincipalType principalType, + String catName, String dbName, String tableName) { List mTbls = - listAllMTableGrants(principalName, principalType, dbName, tableName); + listAllMTableGrants(principalName, principalType, catName, dbName, tableName); if (mTbls.isEmpty()) { return Collections.emptyList(); } @@ -6377,6 +6602,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPrincipalMPartitionGrants( - String principalName, PrincipalType principalType, String dbName, + private List listPrincipalMPartitionGrants( + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, String partName) { boolean success = false; Query query = null; tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); List mSecurityTabPartList = new ArrayList<>(); try { LOG.debug("Executing listPrincipalPartitionGrants"); @@ -6403,13 +6630,14 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mPrivs = (List) query.executeWithArray(principalName, - principalType.toString(), tableName, dbName, partName); + principalType.toString(), tableName, dbName, catName, partName); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6425,12 +6653,13 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPrincipalPartitionGrants(String principalName, PrincipalType principalType, + String catName, String dbName, String tableName, List partValues, String partName) { List mParts = listPrincipalMPartitionGrants(principalName, - principalType, dbName, tableName, partName); + principalType, catName, dbName, tableName, partName); if (mParts.isEmpty()) { return Collections.emptyList(); } @@ -6439,6 +6668,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPrincipalMTableColumnGrants( - String principalName, PrincipalType principalType, String dbName, + private List listPrincipalMTableColumnGrants( + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, String columnName) { boolean success = false; Query query = null; @@ -6467,13 +6697,14 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mPrivs = (List) query.executeWithArray(principalName, - principalType.toString(), tableName, dbName, columnName); + principalType.toString(), tableName, dbName, catName, columnName); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6489,11 +6720,12 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPrincipalTableColumnGrants(String principalName, PrincipalType principalType, + String catName, String dbName, String tableName, String columnName) { List mTableCols = - listPrincipalMTableColumnGrants(principalName, principalType, dbName, tableName, columnName); + listPrincipalMTableColumnGrants(principalName, principalType, catName, dbName, tableName, columnName); if (mTableCols.isEmpty()) { return Collections.emptyList(); } @@ -6502,6 +6734,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPrincipalMPartitionColumnGrants( - String principalName, PrincipalType principalType, String dbName, + private List listPrincipalMPartitionColumnGrants( + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, String partitionName, String columnName) { boolean success = false; Query query = null; tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); columnName = normalizeIdentifier(columnName); + catName = normalizeIdentifier(catName); List mSecurityColList = new ArrayList<>(); try { LOG.debug("Executing listPrincipalPartitionColumnGrants"); @@ -6530,12 +6764,13 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mPrivs = (List) query.executeWithArray(principalName, - principalType.toString(), tableName, dbName, partitionName, columnName); + principalType.toString(), tableName, dbName, catName, partitionName, columnName); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6551,13 +6786,14 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPrincipalPartitionColumnGrants(String principalName, PrincipalType principalType, + String catName, String dbName, String tableName, List partValues, String partitionName, String columnName) { List mPartitionCols = - listPrincipalMPartitionColumnGrants(principalName, principalType, dbName, tableName, + listPrincipalMPartitionColumnGrants(principalName, principalType, catName, dbName, tableName, partitionName, columnName); if (mPartitionCols.isEmpty()) { return Collections.emptyList(); @@ -6567,6 +6803,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPartitionColumnGrantsAll(String dbName, String tableName, - String partitionName, String columnName) { + public List listPartitionColumnGrantsAll( + String catName, String dbName, String tableName, String partitionName, String columnName) { boolean success = false; Query query = null; try { @@ -6620,11 +6857,12 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mSecurityTabPartList = - (List) query.executeWithArray(tableName, dbName, + (List) query.executeWithArray(tableName, dbName, catName, partitionName, columnName); LOG.debug("Done executing query for listPartitionColumnGrantsAll"); pm.retrieveAll(mSecurityTabPartList); @@ -6649,6 +6887,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listTableGrantsAll(String dbName, String tableName) { + public List listTableGrantsAll(String catName, String dbName, String tableName) { boolean success = false; Query query = null; dbName = normalizeIdentifier(dbName); @@ -6722,10 +6961,11 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mSecurityTabPartList = - (List) query.executeWithArray(tableName, dbName); + (List) query.executeWithArray(tableName, dbName, catName); LOG.debug("Done executing query for listTableGrantsAll"); pm.retrieveAll(mSecurityTabPartList); List result = convertTable(mSecurityTabPartList); @@ -6748,6 +6988,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPartitionGrantsAll(String dbName, String tableName, + public List listPartitionGrantsAll(String catName, String dbName, String tableName, String partitionName) { boolean success = false; Query query = null; @@ -6820,10 +7061,11 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mSecurityTabPartList = - (List) query.executeWithArray(tableName, dbName, partitionName); + (List) query.executeWithArray(tableName, dbName, catName, partitionName); LOG.debug("Done executing query for listPrincipalPartitionGrantsAll"); pm.retrieveAll(mSecurityTabPartList); List result = convertPartition(mSecurityTabPartList); @@ -6847,6 +7089,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listTableColumnGrantsAll(String dbName, String tableName, + public List listTableColumnGrantsAll(String catName, String dbName, String tableName, String columnName) { boolean success = false; Query query = null; @@ -6925,10 +7168,13 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mSecurityTabPartList = - (List) query.executeWithArray(tableName, dbName, columnName); + (List) query.executeWithArray(tableName, dbName, + catName, columnName); LOG.debug("Done executing query for listPrincipalTableColumnGrantsAll"); pm.retrieveAll(mSecurityTabPartList); List result = convertTableCols(mSecurityTabPartList); @@ -6951,6 +7197,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List partName, PartitionEventType evtType) throws UnknownTableException, MetaException, InvalidPartitionException, UnknownPartitionException { boolean success = false; @@ -6997,16 +7244,17 @@ public boolean isPartitionMarkedForEvent(String dbName, String tblName, openTransaction(); query = pm.newQuery(MPartitionEvent.class, - "dbName == t1 && tblName == t2 && partName == t3 && eventType == t4"); + "dbName == t1 && tblName == t2 && partName == t3 && eventType == t4 && catalogName == t5"); query - .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, int t4"); - Table tbl = getTable(dbName, tblName); // Make sure dbName and tblName are valid. + .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, int t4," + + "java.lang.String t5"); + Table tbl = getTable(catName, dbName, tblName); // Make sure dbName and tblName are valid. if (null == tbl) { throw new UnknownTableException("Table: " + tblName + " is not found."); } Collection partEvents = (Collection) query.executeWithArray(dbName, tblName, - getPartitionStr(tbl, partName), evtType.getValue()); + getPartitionStr(tbl, partName), evtType.getValue(), catName); pm.retrieveAll(partEvents); success = commitTransaction(); @@ -7018,7 +7266,7 @@ public boolean isPartitionMarkedForEvent(String dbName, String tblName, } @Override - public Table markPartitionForEvent(String dbName, String tblName, Map partName, + public Table markPartitionForEvent(String catName, String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { LOG.debug("Begin executing markPartitionForEvent"); @@ -7026,11 +7274,11 @@ public Table markPartitionForEvent(String dbName, String tblName, Map result = null; validateTableCols(table, colNames); Query query = queryWrapper.query = pm.newQuery(MTableColumnStatistics.class); - String filter = "tableName == t1 && dbName == t2 && ("; - String paramStr = "java.lang.String t1, java.lang.String t2"; - Object[] params = new Object[colNames.size() + 2]; + String filter = "tableName == t1 && dbName == t2 && catName == t3 && ("; + String paramStr = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; + Object[] params = new Object[colNames.size() + 3]; params[0] = table.getTableName(); params[1] = table.getDbName(); + params[2] = table.getCatName(); for (int i = 0; i < colNames.size(); ++i) { filter += ((i == 0) ? "" : " || ") + "colName == c" + i; paramStr += ", java.lang.String c" + i; - params[i + 2] = colNames.get(i); + params[i + 3] = colNames.get(i); } filter += ")"; query.setFilter(filter); @@ -7818,20 +8070,20 @@ public void validateTableCols(Table table, List colNames) throws MetaExc } @Override - public ColumnStatistics getTableColumnStatistics(String dbName, String tableName, + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, List colNames) throws MetaException, NoSuchObjectException { - return getTableColumnStatisticsInternal(dbName, tableName, colNames, true, true); + return getTableColumnStatisticsInternal(catName, dbName, tableName, colNames, true, true); } protected ColumnStatistics getTableColumnStatisticsInternal( - String dbName, String tableName, final List colNames, boolean allowSql, + String catName, String dbName, String tableName, final List colNames, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); - return new GetStatHelper(normalizeIdentifier(dbName), + return new GetStatHelper(normalizeIdentifier(catName), normalizeIdentifier(dbName), normalizeIdentifier(tableName), allowSql, allowJdo) { @Override protected ColumnStatistics getSqlResult(GetHelper ctx) throws MetaException { - return directSql.getTableStats(dbName, tblName, colNames, enableBitVector); + return directSql.getTableStats(catName, dbName, tblName, colNames, enableBitVector); } @Override protected ColumnStatistics getJdoResult( @@ -7863,21 +8115,21 @@ protected ColumnStatistics getJdoResult( } @Override - public List getPartitionColumnStatistics(String dbName, String tableName, + public List getPartitionColumnStatistics(String catName, String dbName, String tableName, List partNames, List colNames) throws MetaException, NoSuchObjectException { return getPartitionColumnStatisticsInternal( - dbName, tableName, partNames, colNames, true, true); + catName, dbName, tableName, partNames, colNames, true, true); } protected List getPartitionColumnStatisticsInternal( - String dbName, String tableName, final List partNames, final List colNames, + String catName, String dbName, String tableName, final List partNames, final List colNames, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); - return new GetListHelper(dbName, tableName, allowSql, allowJdo) { + return new GetListHelper(catName, dbName, tableName, allowSql, allowJdo) { @Override protected List getSqlResult( GetHelper> ctx) throws MetaException { - return directSql.getPartitionStats(dbName, tblName, partNames, colNames, enableBitVector); + return directSql.getPartitionStats(catName, dbName, tblName, partNames, colNames, enableBitVector); } @Override protected List getJdoResult( @@ -7919,17 +8171,17 @@ protected ColumnStatistics getJdoResult( @Override - public AggrStats get_aggr_stats_for(String dbName, String tblName, + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, final List partNames, final List colNames) throws MetaException, NoSuchObjectException { final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION); final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); - return new GetHelper(dbName, tblName, true, false) { + return new GetHelper(catName, dbName, tblName, true, false) { @Override protected AggrStats getSqlResult(GetHelper ctx) throws MetaException { - return directSql.aggrColStatsForPartitions(dbName, tblName, partNames, + return directSql.aggrColStatsForPartitions(catName, dbName, tblName, partNames, colNames, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector); } @Override @@ -7948,6 +8200,35 @@ protected String describeResult() { } @Override + public List getPartitionColStatsForDatabase(String catName, String dbName) + throws MetaException, NoSuchObjectException { + final boolean enableBitVector = + MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); + return new GetHelper>(catName, dbName, null, true, false) { + @Override + protected List getSqlResult( + GetHelper> ctx) throws MetaException { + return directSql.getColStatsForAllTablePartitions(catName, dbName, enableBitVector); + } + + @Override + protected List getJdoResult( + GetHelper> ctx) + throws MetaException, NoSuchObjectException { + // This is fast path for query optimizations, if we can find this info + // quickly using directSql, do it. No point in failing back to slow path + // here. + throw new MetaException("Jdo path is not implemented for getPartitionColStatsForDatabase."); + } + + @Override + protected String describeResult() { + return null; + } + }.run(true); + } + + @Override public void flushCache() { // NOP as there's no caching } @@ -7967,12 +8248,13 @@ public void flushCache() { LOG.warn("The table does not have the same column definition as its partition."); } Query query = queryWrapper.query = pm.newQuery(MPartitionColumnStatistics.class); - String paramStr = "java.lang.String t1, java.lang.String t2"; - String filter = "tableName == t1 && dbName == t2 && ("; - Object[] params = new Object[colNames.size() + partNames.size() + 2]; + String paramStr = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; + String filter = "tableName == t1 && dbName == t2 && catName == t3 && ("; + Object[] params = new Object[colNames.size() + partNames.size() + 3]; int i = 0; params[i++] = table.getTableName(); params[i++] = table.getDbName(); + params[i++] = table.isSetCatName() ? table.getCatName() : getDefaultCatalog(conf); int firstI = i; for (String s : partNames) { filter += ((i == firstI) ? "" : " || ") + "partitionName == p" + i; @@ -8011,34 +8293,36 @@ public void flushCache() { } private void dropPartitionColumnStatisticsNoTxn( - String dbName, String tableName, List partNames) throws MetaException { + String catName, String dbName, String tableName, List partNames) throws MetaException { ObjectPair queryWithParams = makeQueryByPartitionNames( - dbName, tableName, partNames, MPartitionColumnStatistics.class, - "tableName", "dbName", "partition.partitionName"); + catName, dbName, tableName, partNames, MPartitionColumnStatistics.class, + "tableName", "dbName", "partition.partitionName", "catName"); queryWithParams.getFirst().deletePersistentAll(queryWithParams.getSecond()); } @Override - public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, - List partVals, String colName) throws NoSuchObjectException, MetaException, - InvalidObjectException, InvalidInputException { + public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, + String partName, List partVals, + String colName) + throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { boolean ret = false; Query query = null; dbName = org.apache.commons.lang.StringUtils.defaultString(dbName, Warehouse.DEFAULT_DATABASE_NAME); + catName = normalizeIdentifier(catName); if (tableName == null) { throw new InvalidInputException("Table name is null."); } try { openTransaction(); - MTable mTable = getMTable(dbName, tableName); + MTable mTable = getMTable(catName, dbName, tableName); MPartitionColumnStatistics mStatsObj; List mStatsObjColl; if (mTable == null) { throw new NoSuchObjectException("Table " + tableName + " for which stats deletion is requested doesn't exist"); } - MPartition mPartition = getMPartition(dbName, tableName, partVals); + MPartition mPartition = getMPartition(catName, dbName, tableName, partVals); if (mPartition == null) { throw new NoSuchObjectException("Partition " + partName + " for which stats deletion is requested doesn't exist"); @@ -8049,13 +8333,13 @@ public boolean deletePartitionColumnStatistics(String dbName, String tableName, if (colName != null) { filter = "partition.partitionName == t1 && dbName == t2 && tableName == t3 && " - + "colName == t4"; + + "colName == t4 && catName == t5"; parameters = "java.lang.String t1, java.lang.String t2, " - + "java.lang.String t3, java.lang.String t4"; + + "java.lang.String t3, java.lang.String t4, java.lang.String t5"; } else { - filter = "partition.partitionName == t1 && dbName == t2 && tableName == t3"; - parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; + filter = "partition.partitionName == t1 && dbName == t2 && tableName == t3 && catName == t4"; + parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4"; } query.setFilter(filter); query.declareParameters(parameters); @@ -8065,25 +8349,28 @@ public boolean deletePartitionColumnStatistics(String dbName, String tableName, (MPartitionColumnStatistics) query.executeWithArray(partName.trim(), normalizeIdentifier(dbName), normalizeIdentifier(tableName), - normalizeIdentifier(colName)); + normalizeIdentifier(colName), + normalizeIdentifier(catName)); pm.retrieve(mStatsObj); if (mStatsObj != null) { pm.deletePersistent(mStatsObj); } else { - throw new NoSuchObjectException("Column stats doesn't exist for db=" + dbName + " table=" - + tableName + " partition=" + partName + " col=" + colName); + throw new NoSuchObjectException("Column stats doesn't exist for table=" + + getCatalogQualifiedTableName(catName, dbName, tableName) + + " partition=" + partName + " col=" + colName); } } else { mStatsObjColl = - (List) query.execute(partName.trim(), + (List) query.executeWithArray(partName.trim(), normalizeIdentifier(dbName), - normalizeIdentifier(tableName)); + normalizeIdentifier(tableName), + normalizeIdentifier(catName)); pm.retrieveAll(mStatsObjColl); if (mStatsObjColl != null) { pm.deletePersistentAll(mStatsObjColl); } else { - throw new NoSuchObjectException("Column stats doesn't exist for db=" + dbName + " table=" - + tableName + " partition" + partName); + throw new NoSuchObjectException("Column stats don't exist for table=" + + getCatalogQualifiedTableName(catName, dbName, tableName) + " partition" + partName); } } ret = commitTransaction(); @@ -8097,7 +8384,8 @@ public boolean deletePartitionColumnStatistics(String dbName, String tableName, } @Override - public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) + public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, + String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { boolean ret = false; Query query = null; @@ -8108,22 +8396,23 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri } try { openTransaction(); - MTable mTable = getMTable(dbName, tableName); + MTable mTable = getMTable(catName, dbName, tableName); MTableColumnStatistics mStatsObj; List mStatsObjColl; if (mTable == null) { - throw new NoSuchObjectException("Table " + tableName + throw new NoSuchObjectException("Table " + + getCatalogQualifiedTableName(catName, dbName, tableName) + " for which stats deletion is requested doesn't exist"); } query = pm.newQuery(MTableColumnStatistics.class); String filter; String parameters; if (colName != null) { - filter = "table.tableName == t1 && dbName == t2 && colName == t3"; - parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; + filter = "table.tableName == t1 && dbName == t2 && catName == t3 && colName == t4"; + parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4"; } else { - filter = "table.tableName == t1 && dbName == t2"; - parameters = "java.lang.String t1, java.lang.String t2"; + filter = "table.tableName == t1 && dbName == t2 && catName == t3"; + parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; } query.setFilter(filter); @@ -8131,8 +8420,9 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri if (colName != null) { query.setUnique(true); mStatsObj = - (MTableColumnStatistics) query.execute(normalizeIdentifier(tableName), + (MTableColumnStatistics) query.executeWithArray(normalizeIdentifier(tableName), normalizeIdentifier(dbName), + normalizeIdentifier(catName), normalizeIdentifier(colName)); pm.retrieve(mStatsObj); @@ -8146,7 +8436,8 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri mStatsObjColl = (List) query.execute( normalizeIdentifier(tableName), - normalizeIdentifier(dbName)); + normalizeIdentifier(dbName), + normalizeIdentifier(catName)); pm.retrieveAll(mStatsObjColl); if (mStatsObjColl != null) { pm.deletePersistentAll(mStatsObjColl); @@ -8530,10 +8821,11 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } @Override - public boolean doesPartitionExist(String dbName, String tableName, List partVals) + public boolean doesPartitionExist(String catName, String dbName, String tableName, List + partVals) throws MetaException { try { - return this.getPartition(dbName, tableName, partVals) != null; + return this.getPartition(catName, dbName, tableName, partVals) != null; } catch (NoSuchObjectException e) { return false; } @@ -8545,14 +8837,15 @@ private void debugLog(String message) { } } - private static final int stackLimit = 5; + private static final int stackLimit = 3; private String getCallStack() { StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace(); int thislimit = Math.min(stackLimit, stackTrace.length); StringBuilder sb = new StringBuilder(); sb.append(" at:"); - for (int i = 4; i < thislimit; i++) { + // Offset by 4 because the first 4 frames are just calls to get down here. + for (int i = 4; i < thislimit + 4; i++) { sb.append("\n\t"); sb.append(stackTrace[i].toString()); } @@ -8572,6 +8865,7 @@ private Function convertToFunction(MFunction mfunc) { mfunc.getCreateTime(), FunctionType.findByValue(mfunc.getFunctionType()), convertToResourceUriList(mfunc.getResourceUris())); + func.setCatName(mfunc.getDatabase().getCatalogName()); return func; } @@ -8592,8 +8886,9 @@ private MFunction convertToMFunction(Function func) throws InvalidObjectExceptio } MDatabase mdb = null; + String catName = func.isSetCatName() ? func.getCatName() : getDefaultCatalog(conf); try { - mdb = getMDatabase(func.getDbName()); + mdb = getMDatabase(catName, func.getDbName()); } catch (NoSuchObjectException e) { LOG.error("Database does not exist", e); throw new InvalidObjectException("Database " + func.getDbName() + " doesn't exist."); @@ -8649,11 +8944,17 @@ public void createFunction(Function func) throws InvalidObjectException, MetaExc } @Override - public void alterFunction(String dbName, String funcName, Function newFunction) + public void alterFunction(String catName, String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException { boolean success = false; try { + String newFuncCat = newFunction.isSetCatName() ? newFunction.getCatName() : + getDefaultCatalog(conf); + if (!newFuncCat.equalsIgnoreCase(catName)) { + throw new InvalidObjectException("You cannot move a function between catalogs"); + } openTransaction(); + catName = normalizeIdentifier(catName); funcName = normalizeIdentifier(funcName); dbName = normalizeIdentifier(dbName); MFunction newf = convertToMFunction(newFunction); @@ -8661,7 +8962,7 @@ public void alterFunction(String dbName, String funcName, Function newFunction) throw new InvalidObjectException("new function is invalid"); } - MFunction oldf = getMFunction(dbName, funcName); + MFunction oldf = getMFunction(catName, dbName, funcName); if (oldf == null) { throw new MetaException("function " + funcName + " doesn't exist"); } @@ -8684,12 +8985,12 @@ public void alterFunction(String dbName, String funcName, Function newFunction) } @Override - public void dropFunction(String dbName, String funcName) throws MetaException, + public void dropFunction(String catName, String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { boolean success = false; try { openTransaction(); - MFunction mfunc = getMFunction(dbName, funcName); + MFunction mfunc = getMFunction(catName, dbName, funcName); pm.retrieve(mfunc); if (mfunc != null) { // TODO: When function privileges are implemented, they should be deleted here. @@ -8703,7 +9004,7 @@ public void dropFunction(String dbName, String funcName) throws MetaException, } } - private MFunction getMFunction(String db, String function) { + private MFunction getMFunction(String catName, String db, String function) { MFunction mfunc = null; boolean commited = false; Query query = null; @@ -8711,10 +9012,11 @@ private MFunction getMFunction(String db, String function) { openTransaction(); db = normalizeIdentifier(db); function = normalizeIdentifier(function); - query = pm.newQuery(MFunction.class, "functionName == function && database.name == db"); - query.declareParameters("java.lang.String function, java.lang.String db"); + query = pm.newQuery(MFunction.class, + "functionName == function && database.name == db && database.catalogName == catName"); + query.declareParameters("java.lang.String function, java.lang.String db, java.lang.String catName"); query.setUnique(true); - mfunc = (MFunction) query.execute(function, db); + mfunc = (MFunction) query.execute(function, db, catName); pm.retrieve(mfunc); commited = commitTransaction(); } finally { @@ -8724,13 +9026,13 @@ private MFunction getMFunction(String db, String function) { } @Override - public Function getFunction(String dbName, String funcName) throws MetaException { + public Function getFunction(String catName, String dbName, String funcName) throws MetaException { boolean commited = false; Function func = null; Query query = null; try { openTransaction(); - func = convertToFunction(getMFunction(dbName, funcName)); + func = convertToFunction(getMFunction(catName, dbName, funcName)); commited = commitTransaction(); } finally { rollbackAndCleanup(commited, query); @@ -8739,13 +9041,15 @@ public Function getFunction(String dbName, String funcName) throws MetaException } @Override - public List getAllFunctions() throws MetaException { + public List getAllFunctions(String catName) throws MetaException { boolean commited = false; Query query = null; try { openTransaction(); - query = pm.newQuery(MFunction.class); - List allFunctions = (List) query.execute(); + catName = normalizeIdentifier(catName); + query = pm.newQuery(MFunction.class, "database.catalogName == catName"); + query.declareParameters("java.lang.String catName"); + List allFunctions = (List) query.execute(catName); pm.retrieveAll(allFunctions); commited = commitTransaction(); return convertToFunctions(allFunctions); @@ -8755,7 +9059,7 @@ public Function getFunction(String dbName, String funcName) throws MetaException } @Override - public List getFunctions(String dbName, String pattern) throws MetaException { + public List getFunctions(String catName, String dbName, String pattern) throws MetaException { boolean commited = false; Query query = null; List funcs = null; @@ -8767,6 +9071,7 @@ public Function getFunction(String dbName, String funcName) throws MetaException List parameterVals = new ArrayList<>(); StringBuilder filterBuilder = new StringBuilder(); appendSimpleCondition(filterBuilder, "database.name", new String[] { dbName }, parameterVals); + appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals); if(pattern != null) { appendPatternCondition(filterBuilder, "functionName", pattern, parameterVals); } @@ -8980,11 +9285,13 @@ public NotificationEventsCountResponse getNotificationEventsCount(NotificationEv openTransaction(); long fromEventId = rqst.getFromEventId(); String inputDbName = rqst.getDbName(); + String catName = rqst.isSetCatName() ? rqst.getCatName() : getDefaultCatalog(conf); String queryStr = "select count(eventId) from " + MNotificationLog.class.getName() - + " where eventId > fromEventId && dbName == inputDbName"; + + " where eventId > fromEventId && dbName == inputDbName && catalogName == catName"; query = pm.newQuery(queryStr); - query.declareParameters("java.lang.Long fromEventId, java.lang.String inputDbName"); - result = (Long) query.execute(fromEventId, inputDbName); + query.declareParameters("java.lang.Long fromEventId, java.lang.String inputDbName," + + " java.lang.String catName"); + result = (Long) query.execute(fromEventId, inputDbName, catName); commited = commitTransaction(); return new NotificationEventsCountResponse(result.longValue()); } finally { @@ -8997,6 +9304,7 @@ private MNotificationLog translateThriftToDb(NotificationEvent entry) { dbEntry.setEventId(entry.getEventId()); dbEntry.setEventTime(entry.getEventTime()); dbEntry.setEventType(entry.getEventType()); + dbEntry.setCatalogName(entry.isSetCatName() ? entry.getCatName() : getDefaultCatalog(conf)); dbEntry.setDbName(entry.getDbName()); dbEntry.setTableName(entry.getTableName()); dbEntry.setMessage(entry.getMessage()); @@ -9009,6 +9317,7 @@ private NotificationEvent translateDbToThrift(MNotificationLog dbEvent) { event.setEventId(dbEvent.getEventId()); event.setEventTime(dbEvent.getEventTime()); event.setEventType(dbEvent.getEventType()); + event.setCatName(dbEvent.getCatalogName()); event.setDbName(dbEvent.getDbName()); event.setTableName(dbEvent.getTableName()); event.setMessage((dbEvent.getMessage())); @@ -9151,36 +9460,38 @@ private static long clearFieldMap(ClassLoaderResolverImpl clri, String mapFieldN @Override - public List getPrimaryKeys(String db_name, String tbl_name) throws MetaException { + public List getPrimaryKeys(String catName, String db_name, String tbl_name) + throws MetaException { try { - return getPrimaryKeysInternal(db_name, tbl_name, true, true); + return getPrimaryKeysInternal(catName, db_name, tbl_name); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } } - protected List getPrimaryKeysInternal(final String db_name_input, - final String tbl_name_input, - boolean allowSql, boolean allowJdo) + private List getPrimaryKeysInternal(final String catName, + final String db_name_input, + final String tbl_name_input) throws MetaException, NoSuchObjectException { final String db_name = normalizeIdentifier(db_name_input); final String tbl_name = normalizeIdentifier(tbl_name_input); - return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { + return new GetListHelper(catName, db_name, tbl_name, true, true) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getPrimaryKeys(db_name, tbl_name); + return directSql.getPrimaryKeys(catName, db_name, tbl_name); } @Override protected List getJdoResult( GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getPrimaryKeysViaJdo(db_name, tbl_name); + return getPrimaryKeysViaJdo(catName, db_name, tbl_name); } }.run(false); } - private List getPrimaryKeysViaJdo(String db_name, String tbl_name) throws MetaException { + private List getPrimaryKeysViaJdo(String catName, String db_name, String tbl_name) + throws MetaException { boolean commited = false; List primaryKeys = null; Query query = null; @@ -9188,9 +9499,11 @@ private static long clearFieldMap(ClassLoaderResolverImpl clri, String mapFieldN openTransaction(); query = pm.newQuery(MConstraint.class, "parentTable.tableName == tbl_name && parentTable.database.name == db_name &&" + + " parentTable.database.catalogName == cat_name &&" + " constraintType == MConstraint.PRIMARY_KEY_CONSTRAINT"); - query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); - Collection constraints = (Collection) query.execute(tbl_name, db_name); + query.declareParameters("java.lang.String tbl_name, java.lang.String db_name, " + + "java.lang.String cat_name"); + Collection constraints = (Collection) query.execute(tbl_name, db_name, catName); pm.retrieveAll(constraints); primaryKeys = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { @@ -9201,11 +9514,13 @@ private static long clearFieldMap(ClassLoaderResolverImpl clri, String mapFieldN boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; - primaryKeys.add(new SQLPrimaryKey(db_name, + SQLPrimaryKey keyCol = new SQLPrimaryKey(db_name, tbl_name, cols.get(currPK.getParentIntegerIndex()).getName(), currPK.getPosition(), - currPK.getConstraintName(), enable, validate, rely)); + currPK.getConstraintName(), enable, validate, rely); + keyCol.setCatName(catName); + primaryKeys.add(keyCol); } commited = commitTransaction(); } finally { @@ -9214,7 +9529,8 @@ private static long clearFieldMap(ClassLoaderResolverImpl clri, String mapFieldN return primaryKeys; } - private String getPrimaryKeyConstraintName(String db_name, String tbl_name) throws MetaException { + private String getPrimaryKeyConstraintName(String catName, String db_name, String tbl_name) + throws MetaException { boolean commited = false; String ret = null; Query query = null; @@ -9223,9 +9539,11 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro openTransaction(); query = pm.newQuery(MConstraint.class, "parentTable.tableName == tbl_name && parentTable.database.name == db_name &&" + + " parentTable.database.catalogName == catName &&" + " constraintType == MConstraint.PRIMARY_KEY_CONSTRAINT"); - query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); - Collection constraints = (Collection) query.execute(tbl_name, db_name); + query.declareParameters("java.lang.String tbl_name, java.lang.String db_name, " + + "java.lang.String catName"); + Collection constraints = (Collection) query.execute(tbl_name, db_name, catName); pm.retrieveAll(constraints); for (Iterator i = constraints.iterator(); i.hasNext();) { MConstraint currPK = (MConstraint) i.next(); @@ -9240,19 +9558,20 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro } @Override - public List getForeignKeys(String parent_db_name, + public List getForeignKeys(String catName, String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException { try { - return getForeignKeysInternal(parent_db_name, + return getForeignKeysInternal(catName, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name, true, true); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } } - protected List getForeignKeysInternal(final String parent_db_name_input, - final String parent_tbl_name_input, final String foreign_db_name_input, - final String foreign_tbl_name_input, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { + private List getForeignKeysInternal( + final String catName, final String parent_db_name_input, final String parent_tbl_name_input, + final String foreign_db_name_input, final String foreign_tbl_name_input, boolean allowSql, + boolean allowJdo) throws MetaException, NoSuchObjectException { final String parent_db_name = parent_db_name_input; final String parent_tbl_name = parent_tbl_name_input; final String foreign_db_name = foreign_db_name_input; @@ -9267,24 +9586,24 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro db_name = foreign_db_name_input; tbl_name = foreign_tbl_name_input; } - return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { + return new GetListHelper(catName, db_name, tbl_name, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getForeignKeys(parent_db_name, + return directSql.getForeignKeys(catName, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); } @Override protected List getJdoResult( GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getForeignKeysViaJdo(parent_db_name, + return getForeignKeysViaJdo(catName, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); } }.run(false); } - private List getForeignKeysViaJdo(String parent_db_name, + private List getForeignKeysViaJdo(String catName, String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException { boolean commited = false; List foreignKeys = null; @@ -9293,23 +9612,24 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro Map tblToConstraint = new HashMap<>(); try { openTransaction(); - String queryText = (parent_tbl_name != null ? "parentTable.tableName == parent_tbl_name && " : "") + String queryText = " parentTable.database.catalogName == catName1 &&" + + "childTable.database.catalogName == catName2 && " + + (parent_tbl_name != null ? "parentTable.tableName == parent_tbl_name && " : "") + (parent_db_name != null ? " parentTable.database.name == parent_db_name && " : "") + (foreign_tbl_name != null ? " childTable.tableName == foreign_tbl_name && " : "") + (foreign_db_name != null ? " childTable.database.name == foreign_db_name && " : "") + " constraintType == MConstraint.FOREIGN_KEY_CONSTRAINT"; queryText = queryText.trim(); query = pm.newQuery(MConstraint.class, queryText); - String paramText = (parent_tbl_name == null ? "" : "java.lang.String parent_tbl_name,") - + (parent_db_name == null ? "" : " java.lang.String parent_db_name, ") - + (foreign_tbl_name == null ? "" : "java.lang.String foreign_tbl_name,") - + (foreign_db_name == null ? "" : " java.lang.String foreign_db_name"); - paramText=paramText.trim(); - if (paramText.endsWith(",")) { - paramText = paramText.substring(0, paramText.length()-1); - } + String paramText = "java.lang.String catName1, java.lang.String catName2" + + (parent_tbl_name == null ? "" : ", java.lang.String parent_tbl_name") + + (parent_db_name == null ? "" : " , java.lang.String parent_db_name") + + (foreign_tbl_name == null ? "" : ", java.lang.String foreign_tbl_name") + + (foreign_db_name == null ? "" : " , java.lang.String foreign_db_name"); query.declareParameters(paramText); List params = new ArrayList<>(); + params.add(catName); + params.add(catName); // This is not a mistake, catName is in the where clause twice if (parent_tbl_name != null) { params.add(parent_tbl_name); } @@ -9322,18 +9642,8 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro if (foreign_db_name != null) { params.add(foreign_db_name); } - if (params.size() == 0) { - constraints = (Collection) query.execute(); - } else if (params.size() ==1) { - constraints = (Collection) query.execute(params.get(0)); - } else if (params.size() == 2) { - constraints = (Collection) query.execute(params.get(0), params.get(1)); - } else if (params.size() == 3) { - constraints = (Collection) query.execute(params.get(0), params.get(1), params.get(2)); - } else { - constraints = (Collection) query.executeWithArray(params.get(0), params.get(1), - params.get(2), params.get(3)); - } + constraints = (Collection) query.executeWithArray(params.toArray(new String[params.size()])); + pm.retrieveAll(constraints); foreignKeys = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { @@ -9353,13 +9663,14 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro if (tblToConstraint.containsKey(consolidatedtblName)) { pkName = tblToConstraint.get(consolidatedtblName); } else { - pkName = getPrimaryKeyConstraintName(currPKFK.getParentTable().getDatabase().getName(), - currPKFK.getParentTable().getDatabase().getName()); + pkName = getPrimaryKeyConstraintName(currPKFK.getParentTable().getDatabase().getCatalogName(), + currPKFK.getParentTable().getDatabase().getName(), + currPKFK.getParentTable().getTableName()); tblToConstraint.put(consolidatedtblName, pkName); } - foreignKeys.add(new SQLForeignKey( - currPKFK.getParentTable().getDatabase().getName(), + SQLForeignKey fk = new SQLForeignKey( currPKFK.getParentTable().getDatabase().getName(), + currPKFK.getParentTable().getTableName(), parentCols.get(currPKFK.getParentIntegerIndex()).getName(), currPKFK.getChildTable().getDatabase().getName(), currPKFK.getChildTable().getTableName(), @@ -9367,7 +9678,9 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro currPKFK.getPosition(), currPKFK.getUpdateRule(), currPKFK.getDeleteRule(), - currPKFK.getConstraintName(), pkName, enable, validate, rely)); + currPKFK.getConstraintName(), pkName, enable, validate, rely); + fk.setCatName(catName); + foreignKeys.add(fk); } commited = commitTransaction(); } finally { @@ -9377,37 +9690,38 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro } @Override - public List getUniqueConstraints(String db_name, String tbl_name) + public List getUniqueConstraints(String catName, String db_name, String tbl_name) throws MetaException { try { - return getUniqueConstraintsInternal(db_name, tbl_name, true, true); + return getUniqueConstraintsInternal(catName, db_name, tbl_name, true, true); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } } - protected List getUniqueConstraintsInternal(final String db_name_input, - final String tbl_name_input, boolean allowSql, boolean allowJdo) - throws MetaException, NoSuchObjectException { + private List getUniqueConstraintsInternal( + String catNameInput, final String db_name_input, final String tbl_name_input, + boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { + final String catName = normalizeIdentifier(catNameInput); final String db_name = normalizeIdentifier(db_name_input); final String tbl_name = normalizeIdentifier(tbl_name_input); - return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { + return new GetListHelper(catName, db_name, tbl_name, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getUniqueConstraints(db_name, tbl_name); + return directSql.getUniqueConstraints(catName, db_name, tbl_name); } @Override protected List getJdoResult(GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getUniqueConstraintsViaJdo(db_name, tbl_name); + return getUniqueConstraintsViaJdo(catName, db_name, tbl_name); } }.run(false); } - private List getUniqueConstraintsViaJdo(String db_name, String tbl_name) + private List getUniqueConstraintsViaJdo(String catName, String db_name, String tbl_name) throws MetaException { boolean commited = false; List uniqueConstraints = null; @@ -9415,10 +9729,10 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro try { openTransaction(); query = pm.newQuery(MConstraint.class, - "parentTable.tableName == tbl_name && parentTable.database.name == db_name &&" + "parentTable.tableName == tbl_name && parentTable.database.name == db_name && parentTable.database.catalogName == catName &&" + " constraintType == MConstraint.UNIQUE_CONSTRAINT"); - query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); - Collection constraints = (Collection) query.execute(tbl_name, db_name); + query.declareParameters("java.lang.String tbl_name, java.lang.String db_name, java.lang.String catName"); + Collection constraints = (Collection) query.execute(tbl_name, db_name, catName); pm.retrieveAll(constraints); uniqueConstraints = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { @@ -9429,7 +9743,7 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; - uniqueConstraints.add(new SQLUniqueConstraint(db_name, + uniqueConstraints.add(new SQLUniqueConstraint(catName, db_name, tbl_name, cols.get(currConstraint.getParentIntegerIndex()).getName(), currConstraint.getPosition(), @@ -9443,79 +9757,81 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro } @Override - public List getNotNullConstraints(String db_name, String tbl_name) + public List getNotNullConstraints(String catName, String db_name, String tbl_name) throws MetaException { try { - return getNotNullConstraintsInternal(db_name, tbl_name, true, true); + return getNotNullConstraintsInternal(catName, db_name, tbl_name, true, true); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } } @Override - public List getDefaultConstraints(String db_name, String tbl_name) + public List getDefaultConstraints(String catName, String db_name, String tbl_name) throws MetaException { try { - return getDefaultConstraintsInternal(db_name, tbl_name, true, true); + return getDefaultConstraintsInternal(catName, db_name, tbl_name, true, true); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } } @Override - public List getCheckConstraints(String db_name, String tbl_name) + public List getCheckConstraints(String catName, String db_name, String tbl_name) throws MetaException { try { - return getCheckConstraintsInternal(db_name, tbl_name, true, true); + return getCheckConstraintsInternal(catName, db_name, tbl_name, true, true); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } } - protected List getDefaultConstraintsInternal(final String db_name_input, - final String tbl_name_input, boolean allowSql, boolean allowJdo) - throws MetaException, NoSuchObjectException { + private List getDefaultConstraintsInternal( + String catName, final String db_name_input, final String tbl_name_input, boolean allowSql, + boolean allowJdo) throws MetaException, NoSuchObjectException { + catName = normalizeIdentifier(catName); final String db_name = normalizeIdentifier(db_name_input); final String tbl_name = normalizeIdentifier(tbl_name_input); - return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { + return new GetListHelper(catName, db_name, tbl_name, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getDefaultConstraints(db_name, tbl_name); + return directSql.getDefaultConstraints(catName, db_name, tbl_name); } @Override protected List getJdoResult(GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getDefaultConstraintsViaJdo(db_name, tbl_name); + return getDefaultConstraintsViaJdo(catName, db_name, tbl_name); } }.run(false); } - protected List getCheckConstraintsInternal(final String db_name_input, + protected List getCheckConstraintsInternal(String catName, final String db_name_input, final String tbl_name_input, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { final String db_name = normalizeIdentifier(db_name_input); final String tbl_name = normalizeIdentifier(tbl_name_input); - return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { + return new GetListHelper(normalizeIdentifier(catName), db_name, tbl_name, + allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getCheckConstraints(db_name, tbl_name); + return directSql.getCheckConstraints(catName, db_name, tbl_name); } @Override protected List getJdoResult(GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getCheckConstraintsViaJdo(db_name, tbl_name); + return getCheckConstraintsViaJdo(catName, db_name, tbl_name); } }.run(false); } - private List getCheckConstraintsViaJdo(String db_name, String tbl_name) + private List getCheckConstraintsViaJdo(String catName, String db_name, String tbl_name) throws MetaException { boolean commited = false; List checkConstraints= null; @@ -9523,10 +9839,10 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro try { openTransaction(); query = pm.newQuery(MConstraint.class, - "parentTable.tableName == tbl_name && parentTable.database.name == db_name &&" - + " constraintType == MConstraint.CHECK_CONSTRAINT"); - query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); - Collection constraints = (Collection) query.execute(tbl_name, db_name); + "parentTable.tableName == tbl_name && parentTable.database.name == db_name &&" + + " parentTable.database.catalogName == catName && constraintType == MConstraint.CHECK_CONSTRAINT"); + query.declareParameters("java.lang.String tbl_name, java.lang.String db_name, java.lang.String catName"); + Collection constraints = (Collection) query.execute(tbl_name, db_name, catName); pm.retrieveAll(constraints); checkConstraints = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { @@ -9537,8 +9853,7 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; - checkConstraints.add(new SQLCheckConstraint(db_name, - tbl_name, + checkConstraints.add(new SQLCheckConstraint(catName, db_name, tbl_name, cols.get(currConstraint.getParentIntegerIndex()).getName(), currConstraint.getDefaultOrCheckValue(), currConstraint.getConstraintName(), enable, validate, rely)); @@ -9555,7 +9870,7 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro return checkConstraints; } - private List getDefaultConstraintsViaJdo(String db_name, String tbl_name) + private List getDefaultConstraintsViaJdo(String catName, String db_name, String tbl_name) throws MetaException { boolean commited = false; List defaultConstraints= null; @@ -9564,9 +9879,11 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro openTransaction(); query = pm.newQuery(MConstraint.class, "parentTable.tableName == tbl_name && parentTable.database.name == db_name &&" + + " parentTable.database.catalogName == catName &&" + " constraintType == MConstraint.DEFAULT_CONSTRAINT"); - query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); - Collection constraints = (Collection) query.execute(tbl_name, db_name); + query.declareParameters( + "java.lang.String tbl_name, java.lang.String db_name, java.lang.String catName"); + Collection constraints = (Collection) query.execute(tbl_name, db_name, catName); pm.retrieveAll(constraints); defaultConstraints = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { @@ -9577,10 +9894,10 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; - defaultConstraints.add(new SQLDefaultConstraint(db_name, - tbl_name, - cols.get(currConstraint.getParentIntegerIndex()).getName(), - currConstraint.getDefaultOrCheckValue(), currConstraint.getConstraintName(), enable, validate, rely)); + defaultConstraints.add(new SQLDefaultConstraint(catName, db_name, + tbl_name, + cols.get(currConstraint.getParentIntegerIndex()).getName(), + currConstraint.getDefaultOrCheckValue(), currConstraint.getConstraintName(), enable, validate, rely)); } commited = commitTransaction(); } finally { @@ -9594,28 +9911,29 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro return defaultConstraints; } - protected List getNotNullConstraintsInternal(final String db_name_input, + protected List getNotNullConstraintsInternal(String catName, final String db_name_input, final String tbl_name_input, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { + catName = normalizeIdentifier(catName); final String db_name = normalizeIdentifier(db_name_input); final String tbl_name = normalizeIdentifier(tbl_name_input); - return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { + return new GetListHelper(catName, db_name, tbl_name, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getNotNullConstraints(db_name, tbl_name); + return directSql.getNotNullConstraints(catName, db_name, tbl_name); } @Override protected List getJdoResult(GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getNotNullConstraintsViaJdo(db_name, tbl_name); + return getNotNullConstraintsViaJdo(catName, db_name, tbl_name); } }.run(false); } - private List getNotNullConstraintsViaJdo(String db_name, String tbl_name) + private List getNotNullConstraintsViaJdo(String catName, String db_name, String tbl_name) throws MetaException { boolean commited = false; List notNullConstraints = null; @@ -9624,9 +9942,10 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro openTransaction(); query = pm.newQuery(MConstraint.class, "parentTable.tableName == tbl_name && parentTable.database.name == db_name &&" - + " constraintType == MConstraint.NOT_NULL_CONSTRAINT"); - query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); - Collection constraints = (Collection) query.execute(tbl_name, db_name); + + " parentTable.database.catalogName == catName && constraintType == MConstraint.NOT_NULL_CONSTRAINT"); + query.declareParameters( + "java.lang.String tbl_name, java.lang.String db_name, java.lang.String catName"); + Collection constraints = (Collection) query.execute(tbl_name, db_name, catName); pm.retrieveAll(constraints); notNullConstraints = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { @@ -9637,7 +9956,7 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; - notNullConstraints.add(new SQLNotNullConstraint(db_name, + notNullConstraints.add(new SQLNotNullConstraint(catName, db_name, tbl_name, cols.get(currConstraint.getParentIntegerIndex()).getName(), currConstraint.getConstraintName(), enable, validate, rely)); @@ -9650,17 +9969,18 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro } @Override - public void dropConstraint(String dbName, String tableName, - String constraintName) throws NoSuchObjectException { + public void dropConstraint(String catName, String dbName, String tableName, + String constraintName, boolean missingOk) + throws NoSuchObjectException { boolean success = false; try { openTransaction(); - List tabConstraints = listAllTableConstraintsWithOptionalConstraintName( - dbName, tableName, constraintName); + List tabConstraints = + listAllTableConstraintsWithOptionalConstraintName(catName, dbName, tableName, constraintName); if (CollectionUtils.isNotEmpty(tabConstraints)) { pm.deletePersistentAll(tabConstraints); - } else { + } else if (!missingOk) { throw new NoSuchObjectException("The constraint: " + constraintName + " does not exist for the associated table: " + dbName + "." + tableName); } @@ -9679,7 +9999,7 @@ public void createISchema(ISchema schema) throws AlreadyExistsException, MetaExc MISchema mSchema = convertToMISchema(schema); try { openTransaction(); - if (getMISchema(schema.getDbName(), schema.getName()) != null) { + if (getMISchema(schema.getCatName(), schema.getDbName(), schema.getName()) != null) { throw new AlreadyExistsException("Schema with name " + schema.getDbName() + "." + schema.getName() + " already exists"); } @@ -9696,7 +10016,7 @@ public void alterISchema(ISchemaName schemaName, ISchema newSchema) boolean committed = false; try { openTransaction(); - MISchema oldMSchema = getMISchema(schemaName.getDbName(), schemaName.getSchemaName()); + MISchema oldMSchema = getMISchema(schemaName.getCatName(), schemaName.getDbName(), schemaName.getSchemaName()); if (oldMSchema == null) { throw new NoSuchObjectException("Schema " + schemaName + " does not exist"); } @@ -9718,7 +10038,8 @@ public ISchema getISchema(ISchemaName schemaName) throws MetaException { boolean committed = false; try { openTransaction(); - ISchema schema = convertToISchema(getMISchema(schemaName.getDbName(), schemaName.getSchemaName())); + ISchema schema = convertToISchema(getMISchema(schemaName.getCatName(), schemaName.getDbName(), + schemaName.getSchemaName())); committed = commitTransaction(); return schema; } finally { @@ -9726,15 +10047,18 @@ public ISchema getISchema(ISchemaName schemaName) throws MetaException { } } - private MISchema getMISchema(String dbName, String name) { + private MISchema getMISchema(String catName, String dbName, String name) { Query query = null; try { name = normalizeIdentifier(name); dbName = normalizeIdentifier(dbName); - query = pm.newQuery(MISchema.class, "name == schemaName && db.name == dbname"); - query.declareParameters("java.lang.String schemaName, java.lang.String dbname"); + catName = normalizeIdentifier(catName); + query = pm.newQuery(MISchema.class, + "name == schemaName && db.name == dbname && db.catalogName == cat"); + query.declareParameters( + "java.lang.String schemaName, java.lang.String dbname, java.lang.String cat"); query.setUnique(true); - MISchema mSchema = (MISchema)query.execute(name, dbName); + MISchema mSchema = (MISchema)query.execute(name, dbName, catName); pm.retrieve(mSchema); return mSchema; } finally { @@ -9747,7 +10071,7 @@ public void dropISchema(ISchemaName schemaName) throws NoSuchObjectException, Me boolean committed = false; try { openTransaction(); - MISchema mSchema = getMISchema(schemaName.getDbName(), schemaName.getSchemaName()); + MISchema mSchema = getMISchema(schemaName.getCatName(), schemaName.getDbName(), schemaName.getSchemaName()); if (mSchema != null) { pm.deletePersistentAll(mSchema); } else { @@ -9767,13 +10091,14 @@ public void addSchemaVersion(SchemaVersion schemaVersion) try { openTransaction(); // Make sure it doesn't already exist - if (getMSchemaVersion(schemaVersion.getSchema().getDbName(), + if (getMSchemaVersion(schemaVersion.getSchema().getCatName(), schemaVersion.getSchema().getDbName(), schemaVersion.getSchema().getSchemaName(), schemaVersion.getVersion()) != null) { throw new AlreadyExistsException("Schema name " + schemaVersion.getSchema() + " version " + schemaVersion.getVersion() + " already exists"); } // Make sure the referenced Schema exists - if (getMISchema(schemaVersion.getSchema().getDbName(), schemaVersion.getSchema().getSchemaName()) == null) { + if (getMISchema(schemaVersion.getSchema().getCatName(), schemaVersion.getSchema().getDbName(), + schemaVersion.getSchema().getSchemaName()) == null) { throw new NoSuchObjectException("Schema " + schemaVersion.getSchema() + " does not exist"); } pm.makePersistent(mSchemaVersion); @@ -9789,8 +10114,8 @@ public void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion ne boolean committed = false; try { openTransaction(); - MSchemaVersion oldMSchemaVersion = getMSchemaVersion(version.getSchema().getDbName(), - version.getSchema().getSchemaName(), version.getVersion()); + MSchemaVersion oldMSchemaVersion = getMSchemaVersion(version.getSchema().getCatName(), + version.getSchema().getDbName(), version.getSchema().getSchemaName(), version.getVersion()); if (oldMSchemaVersion == null) { throw new NoSuchObjectException("No schema version " + version + " exists"); } @@ -9809,9 +10134,9 @@ public SchemaVersion getSchemaVersion(SchemaVersionDescriptor version) throws Me boolean committed = false; try { openTransaction(); - SchemaVersion schemaVersion = - convertToSchemaVersion(getMSchemaVersion(version.getSchema().getDbName(), - version.getSchema().getSchemaName(), version.getVersion())); + SchemaVersion schemaVersion = convertToSchemaVersion(getMSchemaVersion( + version.getSchema().getCatName(), version.getSchema().getDbName(), + version.getSchema().getSchemaName(), version.getVersion())); committed = commitTransaction(); return schemaVersion; } finally { @@ -9819,17 +10144,19 @@ public SchemaVersion getSchemaVersion(SchemaVersionDescriptor version) throws Me } } - private MSchemaVersion getMSchemaVersion(String dbName, String schemaName, int version) { + private MSchemaVersion getMSchemaVersion(String catName, String dbName, String schemaName, int version) { Query query = null; try { dbName = normalizeIdentifier(dbName); schemaName = normalizeIdentifier(schemaName); query = pm.newQuery(MSchemaVersion.class, - "iSchema.name == schemaName && iSchema.db.name == dbName && version == schemaVersion"); - query.declareParameters( - "java.lang.String schemaName, java.lang.String dbName, java.lang.Integer schemaVersion"); + "iSchema.name == schemaName && iSchema.db.name == dbName &&" + + "iSchema.db.catalogName == cat && version == schemaVersion"); + query.declareParameters( "java.lang.String schemaName, java.lang.String dbName," + + "java.lang.String cat, java.lang.Integer schemaVersion"); query.setUnique(true); - MSchemaVersion mSchemaVersion = (MSchemaVersion)query.execute(schemaName, dbName, version); + MSchemaVersion mSchemaVersion = + (MSchemaVersion)query.executeWithArray(schemaName, dbName, catName, version); pm.retrieve(mSchemaVersion); if (mSchemaVersion != null) { pm.retrieveAll(mSchemaVersion.getCols()); @@ -9849,13 +10176,15 @@ public SchemaVersion getLatestSchemaVersion(ISchemaName schemaName) throws MetaE openTransaction(); String name = normalizeIdentifier(schemaName.getSchemaName()); String dbName = normalizeIdentifier(schemaName.getDbName()); + String catName = normalizeIdentifier(schemaName.getCatName()); query = pm.newQuery(MSchemaVersion.class, - "iSchema.name == schemaName && iSchema.db.name == dbName"); - query.declareParameters("java.lang.String schemaName, java.lang.String dbName"); + "iSchema.name == schemaName && iSchema.db.name == dbName && iSchema.db.catalogName == cat"); + query.declareParameters("java.lang.String schemaName, java.lang.String dbName, " + + "java.lang.String cat"); query.setUnique(true); query.setOrdering("version descending"); query.setRange(0, 1); - MSchemaVersion mSchemaVersion = (MSchemaVersion)query.execute(name, dbName); + MSchemaVersion mSchemaVersion = (MSchemaVersion)query.execute(name, dbName, catName); pm.retrieve(mSchemaVersion); if (mSchemaVersion != null) { pm.retrieveAll(mSchemaVersion.getCols()); @@ -9877,11 +10206,13 @@ public SchemaVersion getLatestSchemaVersion(ISchemaName schemaName) throws MetaE openTransaction(); String name = normalizeIdentifier(schemaName.getSchemaName()); String dbName = normalizeIdentifier(schemaName.getDbName()); - query = pm.newQuery(MSchemaVersion.class, - "iSchema.name == schemaName && iSchema.db.name == dbName"); - query.declareParameters("java.lang.String schemaName, java.lang.String dbName"); + String catName = normalizeIdentifier(schemaName.getCatName()); + query = pm.newQuery(MSchemaVersion.class, "iSchema.name == schemaName &&" + + "iSchema.db.name == dbName && iSchema.db.catalogName == cat"); + query.declareParameters("java.lang.String schemaName, java.lang.String dbName," + + " java.lang.String cat"); query.setOrdering("version descending"); - List mSchemaVersions = query.setParameters(name, dbName).executeList(); + List mSchemaVersions = query.setParameters(name, dbName, catName).executeList(); pm.retrieveAll(mSchemaVersions); if (mSchemaVersions == null || mSchemaVersions.isEmpty()) return null; List schemaVersions = new ArrayList<>(mSchemaVersions.size()); @@ -9958,7 +10289,8 @@ public void dropSchemaVersion(SchemaVersionDescriptor version) throws NoSuchObje boolean committed = false; try { openTransaction(); - MSchemaVersion mSchemaVersion = getMSchemaVersion(version.getSchema().getDbName(), + MSchemaVersion mSchemaVersion = getMSchemaVersion(version.getSchema().getCatName(), + version.getSchema().getDbName(), version.getSchema().getSchemaName(), version.getVersion()); if (mSchemaVersion != null) { pm.deletePersistentAll(mSchemaVersion); @@ -10022,7 +10354,7 @@ public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaExcepti private MISchema convertToMISchema(ISchema schema) throws NoSuchObjectException { return new MISchema(schema.getSchemaType().getValue(), normalizeIdentifier(schema.getName()), - getMDatabase(schema.getDbName()), + getMDatabase(schema.getCatName(), schema.getDbName()), schema.getCompatibility().getValue(), schema.getValidationLevel().getValue(), schema.isCanEvolve(), @@ -10034,6 +10366,7 @@ private ISchema convertToISchema(MISchema mSchema) { if (mSchema == null) return null; ISchema schema = new ISchema(SchemaType.findByValue(mSchema.getSchemaType()), mSchema.getName(), + mSchema.getDb().getCatalogName(), mSchema.getDb().getName(), SchemaCompatibility.findByValue(mSchema.getCompatibility()), SchemaValidation.findByValue(mSchema.getValidationLevel()), @@ -10044,8 +10377,10 @@ private ISchema convertToISchema(MISchema mSchema) { } private MSchemaVersion convertToMSchemaVersion(SchemaVersion schemaVersion) throws MetaException { - return new MSchemaVersion(getMISchema(normalizeIdentifier(schemaVersion.getSchema().getDbName()), - normalizeIdentifier(schemaVersion.getSchema().getSchemaName())), + return new MSchemaVersion(getMISchema( + normalizeIdentifier(schemaVersion.getSchema().getCatName()), + normalizeIdentifier(schemaVersion.getSchema().getDbName()), + normalizeIdentifier(schemaVersion.getSchema().getSchemaName())), schemaVersion.getVersion(), schemaVersion.getCreatedAt(), createNewMColumnDescriptor(convertToMFieldSchemas(schemaVersion.getCols())), @@ -10060,8 +10395,8 @@ private MSchemaVersion convertToMSchemaVersion(SchemaVersion schemaVersion) thro private SchemaVersion convertToSchemaVersion(MSchemaVersion mSchemaVersion) throws MetaException { if (mSchemaVersion == null) return null; SchemaVersion schemaVersion = new SchemaVersion( - new ISchemaName(mSchemaVersion.getiSchema().getDb().getName(), - mSchemaVersion.getiSchema().getName()), + new ISchemaName(mSchemaVersion.getiSchema().getDb().getCatalogName(), + mSchemaVersion.getiSchema().getDb().getName(), mSchemaVersion.getiSchema().getName()), mSchemaVersion.getVersion(), mSchemaVersion.getCreatedAt(), convertToFieldSchemas(mSchemaVersion.getCols().getCols())); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 4f07619802..af545d1018 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -36,7 +36,9 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -44,6 +46,7 @@ import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.ISchema; +import org.apache.hadoop.hive.metastore.api.ISchemaName; import org.apache.hadoop.hive.metastore.api.InvalidInputException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; @@ -61,14 +64,10 @@ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; +import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; @@ -82,8 +81,14 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.metastore.api.WMNullablePool; +import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; import org.apache.hadoop.hive.metastore.api.WMPool; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo; import org.apache.thrift.TException; @@ -126,19 +131,107 @@ @CanNotRetry void rollbackTransaction(); + /** + * Create a new catalog. + * @param cat Catalog to create. + * @throws MetaException if something goes wrong, usually in storing it to the database. + */ + void createCatalog(Catalog cat) throws MetaException; + + /** + * Alter an existing catalog. Only description and location can be changed, and the change of + * location is for internal use only. + * @param catName name of the catalog to alter. + * @param cat new version of the catalog. + * @throws MetaException something went wrong, usually in the database. + * @throws InvalidOperationException attempt to change something about the catalog that is not + * changeable, like the name. + */ + void alterCatalog(String catName, Catalog cat) throws MetaException, InvalidOperationException; + + /** + * Get a catalog. + * @param catalogName name of the catalog. + * @return The catalog. + * @throws NoSuchObjectException no catalog of this name exists. + * @throws MetaException if something goes wrong, usually in reading it from the database. + */ + Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException; + + /** + * Get all the catalogs. + * @return list of names of all catalogs in the system + * @throws MetaException if something goes wrong, usually in reading from the database. + */ + List getCatalogs() throws MetaException; + + /** + * Drop a catalog. The catalog must be empty. + * @param catalogName name of the catalog to drop. + * @throws NoSuchObjectException no catalog of this name exists. + * @throws MetaException could mean the catalog isn't empty, could mean general database error. + */ + void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException; + + /** + * Create a database. + * @param db database to create. + * @throws InvalidObjectException not sure it actually ever throws this. + * @throws MetaException if something goes wrong, usually in writing it to the database. + */ void createDatabase(Database db) throws InvalidObjectException, MetaException; - Database getDatabase(String name) + /** + * Get a database. + * @param catalogName catalog the database is in. + * @param name name of the database. + * @return the database. + * @throws NoSuchObjectException if no such database exists. + */ + Database getDatabase(String catalogName, String name) throws NoSuchObjectException; - boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException; + /** + * Drop a database. + * @param catalogName catalog the database is in. + * @param dbname name of the database. + * @return true if the database was dropped, pretty much always returns this if it returns. + * @throws NoSuchObjectException no database in this catalog of this name to drop + * @throws MetaException something went wrong, usually with the database. + */ + boolean dropDatabase(String catalogName, String dbname) + throws NoSuchObjectException, MetaException; - boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException, MetaException; + /** + * Alter a database. + * @param catalogName name of the catalog the database is in. + * @param dbname name of the database to alter + * @param db new version of the database. This should be complete as it will fully replace the + * existing db object. + * @return true if the change succeeds, could fail due to db constraint violations. + * @throws NoSuchObjectException no database of this name exists to alter. + * @throws MetaException something went wrong, usually with the database. + */ + boolean alterDatabase(String catalogName, String dbname, Database db) + throws NoSuchObjectException, MetaException; - List getDatabases(String pattern) throws MetaException; + /** + * Get all database in a catalog having names that match a pattern. + * @param catalogName name of the catalog to search for databases in + * @param pattern pattern names should match + * @return list of matching database names. + * @throws MetaException something went wrong, usually with the database. + */ + List getDatabases(String catalogName, String pattern) throws MetaException; - List getAllDatabases() throws MetaException; + /** + * Get names of all the databases in a catalog. + * @param catalogName name of the catalog to search for databases in + * @return list of names of all databases in the catalog + * @throws MetaException something went wrong, usually with the database. + */ + List getAllDatabases(String catalogName) throws MetaException; boolean createType(Type type); @@ -149,53 +242,198 @@ Database getDatabase(String name) void createTable(Table tbl) throws InvalidObjectException, MetaException; - boolean dropTable(String dbName, String tableName) + /** + * Drop a table. + * @param catalogName catalog the table is in + * @param dbName database the table is in + * @param tableName table name + * @return true if the table was dropped + * @throws MetaException something went wrong, usually in the RDBMS or storage + * @throws NoSuchObjectException No table of this name + * @throws InvalidObjectException Don't think this is ever actually thrown + * @throws InvalidInputException Don't think this is ever actually thrown + */ + boolean dropTable(String catalogName, String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; - Table getTable(String dbName, String tableName) - throws MetaException; + /** + * Get a table object. + * @param catalogName catalog the table is in. + * @param dbName database the table is in. + * @param tableName table name. + * @return table object, or null if no such table exists (wow it would be nice if we either + * consistently returned null or consistently threw NoSuchObjectException). + * @throws MetaException something went wrong in the RDBMS + */ + Table getTable(String catalogName, String dbName, String tableName) throws MetaException; + /** + * Add a partition. + * @param part partition to add + * @return true if the partition was successfully added. + * @throws InvalidObjectException the provided partition object is not valid. + * @throws MetaException error writing to the RDBMS. + */ boolean addPartition(Partition part) throws InvalidObjectException, MetaException; - boolean addPartitions(String dbName, String tblName, List parts) + /** + * Add a list of partitions to a table. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param parts list of partitions to be added. + * @return true if the operation succeeded. + * @throws InvalidObjectException never throws this AFAICT + * @throws MetaException the partitions don't belong to the indicated table or error writing to + * the RDBMS. + */ + boolean addPartitions(String catName, String dbName, String tblName, List parts) throws InvalidObjectException, MetaException; - boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) + /** + * Add a list of partitions to a table. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partitionSpec specification for the partition + * @param ifNotExists whether it is in an error if the partition already exists. If true, then + * it is not an error if the partition exists, if false, it is. + * @return whether the partition was created. + * @throws InvalidObjectException The passed in partition spec or table specification is invalid. + * @throws MetaException error writing to RDBMS. + */ + boolean addPartitions(String catName, String dbName, String tblName, + PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException; - Partition getPartition(String dbName, String tableName, + /** + * Get a partition. + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name. + * @param part_vals partition values for this table. + * @return the partition. + * @throws MetaException error reading from RDBMS. + * @throws NoSuchObjectException no partition matching this specification exists. + */ + Partition getPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException; - boolean doesPartitionExist(String dbName, String tableName, + /** + * Check whether a partition exists. + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name. + * @param part_vals list of partition values. + * @return true if the partition exists, false otherwise. + * @throws MetaException failure reading RDBMS + * @throws NoSuchObjectException this is never thrown. + */ + boolean doesPartitionExist(String catName, String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException; - boolean dropPartition(String dbName, String tableName, + /** + * Drop a partition. + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name. + * @param part_vals list of partition values. + * @return true if the partition was dropped. + * @throws MetaException Error accessing the RDBMS. + * @throws NoSuchObjectException no partition matching this description exists + * @throws InvalidObjectException error dropping the statistics for the partition + * @throws InvalidInputException error dropping the statistics for the partition + */ + boolean dropPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; - List getPartitions(String dbName, + /** + * Get some or all partitions for a table. + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name + * @param max maximum number of partitions, or -1 to get all partitions. + * @return list of partitions + * @throws MetaException error access the RDBMS. + * @throws NoSuchObjectException no such table exists + */ + List getPartitions(String catName, String dbName, String tableName, int max) throws MetaException, NoSuchObjectException; - void alterTable(String dbname, String name, Table newTable) + /** + * Alter a table. + * @param catName catalog the table is in. + * @param dbname database the table is in. + * @param name name of the table. + * @param newTable New table object. Which parts of the table can be altered are + * implementation specific. + * @throws InvalidObjectException The new table object is invalid. + * @throws MetaException something went wrong, usually in the RDBMS or storage. + */ + void alterTable(String catName, String dbname, String name, Table newTable) throws InvalidObjectException, MetaException; - void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + /** + * Update creation metadata for a materialized view. + * @param catName catalog name. + * @param dbname database name. + * @param tablename table name. + * @param cm new creation metadata + * @throws MetaException error accessing the RDBMS. + */ + void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) throws MetaException; - List getTables(String dbName, String pattern) + /** + * Get table names that match a pattern. + * @param catName catalog to search in + * @param dbName database to search in + * @param pattern pattern to match + * @return list of table names, if any + * @throws MetaException failure in querying the RDBMS + */ + List getTables(String catName, String dbName, String pattern) throws MetaException; - List getTables(String dbName, String pattern, TableType tableType) + /** + * Get table names that match a pattern. + * @param catName catalog to search in + * @param dbName database to search in + * @param pattern pattern to match + * @param tableType type of table to look for + * @return list of table names, if any + * @throws MetaException failure in querying the RDBMS + */ + List getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException; - List getMaterializedViewsForRewriting(String dbName) + /** + * Get list of materialized views in a database. + * @param catName catalog name + * @param dbName database name + * @return names of all materialized views in the database + * @throws MetaException error querying the RDBMS + * @throws NoSuchObjectException no such database + */ + List getMaterializedViewsForRewriting(String catName, String dbName) throws MetaException, NoSuchObjectException; - List getTableMeta( - String dbNames, String tableNames, List tableTypes) throws MetaException; + /** + + * @param catName catalog name to search in. Search must be confined to one catalog. + * @param dbNames databases to search in. + * @param tableNames names of tables to select. + * @param tableTypes types of tables to look for. + * @return list of matching table meta information. + * @throws MetaException failure in querying the RDBMS. + */ + List getTableMeta(String catName, String dbNames, String tableNames, + List tableTypes) throws MetaException; /** + * @param catName catalog name * @param dbname * The name of the database from which to retrieve the tables * @param tableNames @@ -203,15 +441,23 @@ void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm * @return A list of the tables retrievable from the database * whose names are in the list tableNames. * If there are duplicate names, only one instance of the table will be returned - * @throws MetaException + * @throws MetaException failure in querying the RDBMS. */ - List
getTableObjectsByName(String dbname, List tableNames) + List
getTableObjectsByName(String catName, String dbname, List tableNames) throws MetaException, UnknownDBException; - List getAllTables(String dbName) throws MetaException; + /** + * Get all tables in a database. + * @param catName catalog name. + * @param dbName database name. + * @return list of table names + * @throws MetaException failure in querying the RDBMS. + */ + List getAllTables(String catName, String dbName) throws MetaException; /** * Gets a list of tables based on a filter string and filter type. + * @param catName catalog name * @param dbName * The name of the database from which you will retrieve the table names * @param filter @@ -222,46 +468,145 @@ void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm * @throws MetaException * @throws UnknownDBException */ - List listTableNamesByFilter(String dbName, - String filter, short max_tables) throws MetaException, UnknownDBException; + List listTableNamesByFilter(String catName, String dbName, String filter, + short max_tables) throws MetaException, UnknownDBException; - List listPartitionNames(String db_name, + /** + * Get a partial or complete list of names for partitions of a table. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param max_parts maximum number of partitions to retrieve, -1 for all. + * @return list of partition names. + * @throws MetaException there was an error accessing the RDBMS + */ + List listPartitionNames(String catName, String db_name, String tbl_name, short max_parts) throws MetaException; - PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, + /** + * Get a list of partition values as one big struct. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param cols partition key columns + * @param applyDistinct whether to apply distinct to the list + * @param filter filter to apply to the partition names + * @param ascending whether to put in ascending order + * @param order whether to order + * @param maxParts maximum number of parts to return, or -1 for all + * @return struct with all of the partition value information + * @throws MetaException error access the RDBMS + */ + PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException; - List listPartitionNamesByFilter(String db_name, - String tbl_name, String filter, short max_parts) throws MetaException; - - void alterPartition(String db_name, String tbl_name, List part_vals, + /** + * Alter a partition. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param part_vals partition values that describe the partition. + * @param new_part new partition object. This should be a complete copy of the old with + * changes values, not just the parts to update. + * @throws InvalidObjectException No such partition. + * @throws MetaException error accessing the RDBMS. + */ + void alterPartition(String catName, String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidObjectException, MetaException; - void alterPartitions(String db_name, String tbl_name, + /** + * Alter a set of partitions. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param part_vals_list list of list of partition values. Each outer list describes one + * partition (with its list of partition values). + * @param new_parts list of new partitions. The order must match the old partitions described in + * part_vals_list. Each of these should be a complete copy of the new + * partition, not just the pieces to update. + * @throws InvalidObjectException One of the indicated partitions does not exist. + * @throws MetaException error accessing the RDBMS. + */ + void alterPartitions(String catName, String db_name, String tbl_name, List> part_vals_list, List new_parts) throws InvalidObjectException, MetaException; + /** + * Get partitions with a filter. This is a portion of the SQL where clause. + * @param catName catalog name + * @param dbName database name + * @param tblName table name + * @param filter SQL where clause filter + * @param maxParts maximum number of partitions to return, or -1 for all. + * @return list of partition objects matching the criteria + * @throws MetaException Error accessing the RDBMS or processing the filter. + * @throws NoSuchObjectException no such table. + */ List getPartitionsByFilter( - String dbName, String tblName, String filter, short maxParts) + String catName, String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException; - boolean getPartitionsByExpr(String dbName, String tblName, + /** + * Get partitions using an already parsed expression. + * @param catName catalog name. + * @param dbName database name + * @param tblName table name + * @param expr an already parsed Hive expression + * @param defaultPartitionName default name of a partition + * @param maxParts maximum number of partitions to return, or -1 for all + * @param result list to place resulting partitions in + * @return true if the result contains unknown partitions. + * @throws TException error executing the expression + */ + boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException; - int getNumPartitionsByFilter(String dbName, String tblName, String filter) + /** + * Get the number of partitions that match a provided SQL filter. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param filter filter from Hive's SQL where clause + * @return number of matching partitions. + * @throws MetaException error accessing the RDBMS or executing the filter + * @throws NoSuchObjectException no such table + */ + int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException; - int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException; + /** + * Get the number of partitions that match an already parsed expression. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param expr an already parsed Hive expression + * @return number of matching partitions. + * @throws MetaException error accessing the RDBMS or working with the expression. + * @throws NoSuchObjectException no such table. + */ + int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) + throws MetaException, NoSuchObjectException; - List getPartitionsByNames( - String dbName, String tblName, List partNames) + /** + * Get partitions by name. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partNames list of partition names. These are names not values, so they will include + * both the key and the value. + * @return list of matching partitions + * @throws MetaException error accessing the RDBMS. + * @throws NoSuchObjectException No such table. + */ + List getPartitionsByNames(String catName, String dbName, String tblName, + List partNames) throws MetaException, NoSuchObjectException; - Table markPartitionForEvent(String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + Table markPartitionForEvent(String catName, String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; - boolean isPartitionMarkedForEvent(String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; boolean addRole(String rowName, String ownerName) throws InvalidObjectException, MetaException, NoSuchObjectException; @@ -278,38 +623,132 @@ boolean revokeRole(Role role, String userName, PrincipalType principalType, PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List groupNames) throws InvalidObjectException, MetaException; - PrincipalPrivilegeSet getDBPrivilegeSet (String dbName, String userName, + /** + * Get privileges for a database for a user. + * @param catName catalog name + * @param dbName database name + * @param userName user name + * @param groupNames list of groups the user is in + * @return privileges for that user on indicated database + * @throws InvalidObjectException no such database + * @throws MetaException error accessing the RDBMS + */ + PrincipalPrivilegeSet getDBPrivilegeSet (String catName, String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException; - PrincipalPrivilegeSet getTablePrivilegeSet (String dbName, String tableName, + /** + * Get privileges for a table for a user. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param userName user name + * @param groupNames list of groups the user is in + * @return privileges for that user on indicated table + * @throws InvalidObjectException no such table + * @throws MetaException error accessing the RDBMS + */ + PrincipalPrivilegeSet getTablePrivilegeSet (String catName, String dbName, String tableName, String userName, List groupNames) throws InvalidObjectException, MetaException; - PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbName, String tableName, + /** + * Get privileges for a partition for a user. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partition partition name + * @param userName user name + * @param groupNames list of groups the user is in + * @return privileges for that user on indicated partition + * @throws InvalidObjectException no such partition + * @throws MetaException error accessing the RDBMS + */ + PrincipalPrivilegeSet getPartitionPrivilegeSet (String catName, String dbName, String tableName, String partition, String userName, List groupNames) throws InvalidObjectException, MetaException; - PrincipalPrivilegeSet getColumnPrivilegeSet (String dbName, String tableName, String partitionName, + /** + * Get privileges for a column in a table or partition for a user. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partitionName partition name, or null for table level column permissions + * @param columnName column name + * @param userName user name + * @param groupNames list of groups the user is in + * @return privileges for that user on indicated column in the table or partition + * @throws InvalidObjectException no such table, partition, or column + * @throws MetaException error accessing the RDBMS + */ + PrincipalPrivilegeSet getColumnPrivilegeSet (String catName, String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException; List listPrincipalGlobalGrants(String principalName, PrincipalType principalType); + /** + * For a given principal name and type, list the DB Grants + * @param principalName principal name + * @param principalType type + * @param catName catalog name + * @param dbName database name + * @return list of privileges for that principal on the specified database. + */ List listPrincipalDBGrants(String principalName, - PrincipalType principalType, String dbName); + PrincipalType principalType, String catName, String dbName); + /** + * For a given principal name and type, list the Table Grants + * @param principalName principal name + * @param principalType type + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @return list of privileges for that principal on the specified database. + */ List listAllTableGrants( - String principalName, PrincipalType principalType, String dbName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName); + /** + * For a given principal name and type, list the Table Grants + * @param principalName principal name + * @param principalType type + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partName partition name (not value) + * @return list of privileges for that principal on the specified database. + */ List listPrincipalPartitionGrants( - String principalName, PrincipalType principalType, String dbName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, List partValues, String partName); + /** + * For a given principal name and type, list the Table Grants + * @param principalName principal name + * @param principalType type + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param columnName column name + * @return list of privileges for that principal on the specified database. + */ List listPrincipalTableColumnGrants( - String principalName, PrincipalType principalType, String dbName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, String columnName); + /** + * For a given principal name and type, list the Table Grants + * @param principalName principal name + * @param principalType type + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partName partition name (not value) + * @param columnName column name + * @return list of privileges for that principal on the specified database. + */ List listPrincipalPartitionColumnGrants( - String principalName, PrincipalType principalType, String dbName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, List partValues, String partName, String columnName); boolean grantPrivileges (PrivilegeBag privileges) @@ -338,16 +777,44 @@ boolean revokePrivileges (PrivilegeBag privileges, boolean grantOption) List listRoleMembers(String roleName); - Partition getPartitionWithAuth(String dbName, String tblName, + /** + * Fetch a partition along with privilege information for a particular user. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partVals partition values + * @param user_name user to get privilege information for. + * @param group_names groups to get privilege information for. + * @return a partition + * @throws MetaException error accessing the RDBMS. + * @throws NoSuchObjectException no such partition exists + * @throws InvalidObjectException error fetching privilege information + */ + Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String user_name, List group_names) throws MetaException, NoSuchObjectException, InvalidObjectException; - List getPartitionsWithAuth(String dbName, + /** + * Fetch some or all partitions for a table, along with privilege information for a particular + * user. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param maxParts maximum number of partitions to fetch, -1 for all partitions. + * @param userName user to get privilege information for. + * @param groupNames groups to get privilege information for. + * @return list of partitions. + * @throws MetaException error access the RDBMS. + * @throws NoSuchObjectException no such table exists + * @throws InvalidObjectException error fetching privilege information. + */ + List getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException; /** * Lists partition names that match a given partial specification + * @param catName catalog name. * @param db_name * The name of the database which has the partitions * @param tbl_name @@ -358,16 +825,17 @@ Partition getPartitionWithAuth(String dbName, String tblName, * @param max_parts * The maximum number of partitions to return * @return A list of partition names that match the partial spec. - * @throws MetaException - * @throws NoSuchObjectException + * @throws MetaException error accessing RDBMS + * @throws NoSuchObjectException No such table exists */ - List listPartitionNamesPs(String db_name, String tbl_name, + List listPartitionNamesPs(String catName, String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException; /** * Lists partitions that match a given partial specification and sets their auth privileges. * If userName and groupNames null, then no auth privileges are set. + * @param catName catalog name. * @param db_name * The name of the database which has the partitions * @param tbl_name @@ -382,34 +850,33 @@ Partition getPartitionWithAuth(String dbName, String tblName, * @param groupNames * The groupNames for the partition for authentication privileges * @return A list of partitions that match the partial spec. - * @throws MetaException - * @throws NoSuchObjectException - * @throws InvalidObjectException + * @throws MetaException error access RDBMS + * @throws NoSuchObjectException No such table exists + * @throws InvalidObjectException error access privilege information */ - List listPartitionsPsWithAuth(String db_name, String tbl_name, + List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, List part_vals, short max_parts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException; /** Persists the given column statistics object to the metastore * @param colStats object to persist * @return Boolean indicating the outcome of the operation - * @throws NoSuchObjectException - * @throws MetaException - * @throws InvalidObjectException - * @throws InvalidInputException + * @throws NoSuchObjectException No such table. + * @throws MetaException error accessing the RDBMS. + * @throws InvalidObjectException the stats object is invalid + * @throws InvalidInputException unable to record the stats for the table */ boolean updateTableColumnStatistics(ColumnStatistics colStats) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; /** Persists the given column statistics object to the metastore - * @param partVals - * * @param statsObj object to persist + * @param partVals partition values to persist the stats for * @return Boolean indicating the outcome of the operation - * @throws NoSuchObjectException - * @throws MetaException - * @throws InvalidObjectException - * @throws InvalidInputException + * @throws NoSuchObjectException No such table. + * @throws MetaException error accessing the RDBMS. + * @throws InvalidObjectException the stats object is invalid + * @throws InvalidInputException unable to record the stats for the table */ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, List partVals) @@ -418,64 +885,67 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, /** * Returns the relevant column statistics for a given column in a given table in a given database * if such statistics exist. - * + * @param catName catalog name. * @param dbName name of the database, defaults to current database * @param tableName name of the table * @param colName names of the columns for which statistics is requested * @return Relevant column statistics for the column for the given table - * @throws NoSuchObjectException - * @throws MetaException + * @throws NoSuchObjectException No such table + * @throws MetaException error accessing the RDBMS * */ - ColumnStatistics getTableColumnStatistics(String dbName, String tableName, + ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, List colName) throws MetaException, NoSuchObjectException; /** - * Returns the relevant column statistics for given columns in given partitions in a given - * table in a given database if such statistics exist. + * Get statistics for a partition for a set of columns. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partNames list of partition names. These are names so must be key1=val1[/key2=val2...] + * @param colNames list of columns to get stats for + * @return list of statistics objects + * @throws MetaException error accessing the RDBMS + * @throws NoSuchObjectException no such partition. */ List getPartitionColumnStatistics( - String dbName, String tblName, List partNames, List colNames) + String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException; /** * Deletes column statistics if present associated with a given db, table, partition and col. If * null is passed instead of a colName, stats when present for all columns associated * with a given db, table and partition are deleted. - * - * @param dbName - * @param tableName - * @param partName - * @param partVals - * @param colName + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name. + * @param partName partition name. + * @param partVals partition values. + * @param colName column name. * @return Boolean indicating the outcome of the operation - * @throws NoSuchObjectException - * @throws MetaException - * @throws InvalidObjectException - * @throws InvalidInputException + * @throws NoSuchObjectException no such partition + * @throws MetaException error access the RDBMS + * @throws InvalidObjectException error dropping the stats + * @throws InvalidInputException bad input, such as null table or database name. */ - - boolean deletePartitionColumnStatistics(String dbName, String tableName, + boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; /** - * Deletes column statistics if present associated with a given db, table and col. If - * null is passed instead of a colName, stats when present for all columns associated - * with a given db and table are deleted. - * - * @param dbName - * @param tableName - * @param colName - * @return Boolean indicating the outcome of the operation - * @throws NoSuchObjectException - * @throws MetaException - * @throws InvalidObjectException - * @throws InvalidInputException + * Delete statistics for a single column or all columns in a table. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param colName column name. Null to delete stats for all columns in the table. + * @return true if the statistics were deleted. + * @throws NoSuchObjectException no such table or column. + * @throws MetaException error access the RDBMS. + * @throws InvalidObjectException error dropping the stats + * @throws InvalidInputException bad inputs, such as null table name. */ - - boolean deleteTableColumnStatistics(String dbName, String tableName, - String colName) + boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, + String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; long cleanupEvents(); @@ -503,100 +973,203 @@ void updateMasterKey(Integer seqNo, String key) abstract void setMetaStoreSchemaVersion(String version, String comment) throws MetaException; - void dropPartitions(String dbName, String tblName, List partNames) + /** + * Drop a list of partitions. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name + * @param partNames list of partition names. + * @throws MetaException error access RDBMS or storage. + * @throws NoSuchObjectException One or more of the partitions does not exist. + */ + void dropPartitions(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException; + /** + * List all DB grants for a given principal. + * @param principalName principal name + * @param principalType type + * @return all DB grants for this principal + */ List listPrincipalDBGrantsAll( String principalName, PrincipalType principalType); + /** + * List all Table grants for a given principal + * @param principalName principal name + * @param principalType type + * @return all Table grants for this principal + */ List listPrincipalTableGrantsAll( String principalName, PrincipalType principalType); + /** + * List all Partition grants for a given principal + * @param principalName principal name + * @param principalType type + * @return all Partition grants for this principal + */ List listPrincipalPartitionGrantsAll( String principalName, PrincipalType principalType); + /** + * List all Table column grants for a given principal + * @param principalName principal name + * @param principalType type + * @return all Table column grants for this principal + */ List listPrincipalTableColumnGrantsAll( String principalName, PrincipalType principalType); + /** + * List all Partition column grants for a given principal + * @param principalName principal name + * @param principalType type + * @return all Partition column grants for this principal + */ List listPrincipalPartitionColumnGrantsAll( String principalName, PrincipalType principalType); List listGlobalGrantsAll(); - List listDBGrantsAll(String dbName); + /** + * Find all the privileges for a given database. + * @param catName catalog name + * @param dbName database name + * @return list of all privileges. + */ + List listDBGrantsAll(String catName, String dbName); + /** + * Find all of the privileges for a given column in a given partition. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partitionName partition name (not value) + * @param columnName column name + * @return all privileges on this column in this partition + */ List listPartitionColumnGrantsAll( - String dbName, String tableName, String partitionName, String columnName); + String catName, String dbName, String tableName, String partitionName, String columnName); - List listTableGrantsAll(String dbName, String tableName); + /** + * Find all of the privileges for a given table + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @return all privileges on this table + */ + List listTableGrantsAll(String catName, String dbName, String tableName); + /** + * Find all of the privileges for a given partition. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partitionName partition name (not value) + * @return all privileges on this partition + */ List listPartitionGrantsAll( - String dbName, String tableName, String partitionName); + String catName, String dbName, String tableName, String partitionName); + /** + * Find all of the privileges for a given column in a given table. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param columnName column name + * @return all privileges on this column in this table + */ List listTableColumnGrantsAll( - String dbName, String tableName, String columnName); + String catName, String dbName, String tableName, String columnName); /** * Register a user-defined function based on the function specification passed in. - * @param func - * @throws InvalidObjectException - * @throws MetaException + * @param func function to create + * @throws InvalidObjectException incorrectly specified function + * @throws MetaException error accessing the RDBMS */ void createFunction(Function func) throws InvalidObjectException, MetaException; /** * Alter function based on new function specs. - * @param dbName - * @param funcName - * @param newFunction - * @throws InvalidObjectException - * @throws MetaException + * @param dbName database name + * @param funcName function name + * @param newFunction new function specification + * @throws InvalidObjectException no such function, or incorrectly specified new function + * @throws MetaException incorrectly specified function */ - void alterFunction(String dbName, String funcName, Function newFunction) + void alterFunction(String catName, String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException; /** * Drop a function definition. - * @param dbName - * @param funcName - * @throws MetaException - * @throws NoSuchObjectException - * @throws InvalidObjectException - * @throws InvalidInputException + * @param dbName database name + * @param funcName function name + * @throws MetaException incorrectly specified function + * @throws NoSuchObjectException no such function + * @throws InvalidObjectException not sure when this is thrown + * @throws InvalidInputException not sure when this is thrown */ - void dropFunction(String dbName, String funcName) + void dropFunction(String catName, String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; /** * Retrieve function by name. - * @param dbName - * @param funcName - * @return - * @throws MetaException + * @param dbName database name + * @param funcName function name + * @return the function + * @throws MetaException incorrectly specified function */ - Function getFunction(String dbName, String funcName) throws MetaException; + Function getFunction(String catName, String dbName, String funcName) throws MetaException; /** * Retrieve all functions. - * @return - * @throws MetaException + * @return all functions in a catalog + * @throws MetaException incorrectly specified function */ - List getAllFunctions() throws MetaException; + List getAllFunctions(String catName) throws MetaException; /** * Retrieve list of function names based on name pattern. - * @param dbName - * @param pattern - * @return - * @throws MetaException + * @param dbName database name + * @param pattern pattern to match + * @return functions that match the pattern + * @throws MetaException incorrectly specified function */ - List getFunctions(String dbName, String pattern) throws MetaException; + List getFunctions(String catName, String dbName, String pattern) throws MetaException; - AggrStats get_aggr_stats_for(String dbName, String tblName, + /** + * Get aggregated stats for a table or partition(s). + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partNames list of partition names. These are the names of the partitions, not + * values. + * @param colNames list of column names + * @return aggregated stats + * @throws MetaException error accessing RDBMS + * @throws NoSuchObjectException no such table or partition + */ + AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException; /** +<<<<<<< HEAD +======= + * Get column stats for all partitions of all tables in the database + * @param catName catalog name + * @param dbName database name + * @return List of column stats objects for all partitions of all tables in the database + * @throws MetaException error accessing RDBMS + * @throws NoSuchObjectException no such database + */ + List getPartitionColStatsForDatabase(String catName, String dbName) + throws MetaException, NoSuchObjectException; + + /** +>>>>>>> e6d9605492... HIVE-18755 Modifications to the metastore for catalogs * Get the next notification event. * @param rqst Request containing information on the last processed notification. * @return list of notifications, sorted by eventId @@ -629,7 +1202,7 @@ AggrStats get_aggr_stats_for(String dbName, String tblName, * This is intended for use by the repl commands to track the progress of incremental dump. * @return */ - public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst); + NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst); /* * Flush any catalog objects held by the metastore implementation. Note that this does not @@ -695,12 +1268,22 @@ void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] @InterfaceStability.Evolving int getDatabaseCount() throws MetaException; - List getPrimaryKeys(String db_name, - String tbl_name) throws MetaException; + /** + * Get the primary associated with a table. Strangely enough each SQLPrimaryKey is actually a + * column in they key, not the key itself. Thus the list. + * @param catName catalog name + * @param db_name database name + * @param tbl_name table name + * @return list of primary key columns or an empty list if the table does not have a primary key + * @throws MetaException error accessing the RDBMS + */ + List getPrimaryKeys(String catName, String db_name, String tbl_name) + throws MetaException; /** * Get the foreign keys for a table. All foreign keys for a particular table can be fetched by * passing null for the last two arguments. + * @param catName catalog name. * @param parent_db_name Database the table referred to is in. This can be null to match all * databases. * @param parent_tbl_name Table that is referred to. This can be null to match all tables. @@ -708,43 +1291,156 @@ void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] * @param foreign_tbl_name Table with the foreign key. * @return List of all matching foreign key columns. Note that if more than one foreign key * matches the arguments the results here will be all mixed together into a single list. - * @throws MetaException if something goes wrong. + * @throws MetaException error access the RDBMS. */ - List getForeignKeys(String parent_db_name, + List getForeignKeys(String catName, String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException; - List getUniqueConstraints(String db_name, + /** + * Get unique constraints associated with a table. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @return list of unique constraints + * @throws MetaException error access the RDBMS. + */ + List getUniqueConstraints(String catName, String db_name, String tbl_name) throws MetaException; - List getNotNullConstraints(String db_name, + /** + * Get not null constraints on a table. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @return list of not null constraints + * @throws MetaException error accessing the RDBMS. + */ + List getNotNullConstraints(String catName, String db_name, String tbl_name) throws MetaException; - List getDefaultConstraints(String db_name, + /** + * Get default values for columns in a table. + * @param catName catalog name + * @param db_name database name + * @param tbl_name table name + * @return list of default values defined on the table. + * @throws MetaException error accessing the RDBMS + */ + List getDefaultConstraints(String catName, String db_name, String tbl_name) throws MetaException; - List getCheckConstraints(String db_name, + /** + * Get check constraints for columns in a table. + * @param catName catalog name. + * @param db_name database name + * @param tbl_name table name + * @return ccheck constraints for this table + * @throws MetaException error accessing the RDBMS + */ + List getCheckConstraints(String catName, String db_name, String tbl_name) throws MetaException; + /** + * Create a table with constraints + * @param tbl table definition + * @param primaryKeys primary key definition, or null + * @param foreignKeys foreign key definition, or null + * @param uniqueConstraints unique constraints definition, or null + * @param notNullConstraints not null constraints definition, or null + * @param defaultConstraints default values definition, or null + * @return list of constraint names + * @throws InvalidObjectException one of the provided objects is malformed. + * @throws MetaException error accessing the RDBMS + */ List createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints, List checkConstraints) throws InvalidObjectException, MetaException; - void dropConstraint(String dbName, String tableName, String constraintName) throws NoSuchObjectException; + /** + * Drop a constraint, any constraint. I have no idea why add and get each have separate + * methods for each constraint type but drop has only one. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param constraintName name of the constraint + * @throws NoSuchObjectException no constraint of this name exists + */ + default void dropConstraint(String catName, String dbName, String tableName, + String constraintName) throws NoSuchObjectException { + dropConstraint(catName, dbName, tableName, constraintName, false); + } + + /** + * Drop a constraint, any constraint. I have no idea why add and get each have separate + * methods for each constraint type but drop has only one. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param constraintName name of the constraint + * @param missingOk if true, it is not an error if there is no constraint of this name. If + * false and there is no constraint of this name an exception will be thrown. + * @throws NoSuchObjectException no constraint of this name exists and missingOk = false + */ + void dropConstraint(String catName, String dbName, String tableName, String constraintName, + boolean missingOk) throws NoSuchObjectException; + /** + * Add a primary key to a table. + * @param pks Columns in the primary key. + * @return the name of the constraint, as a list of strings. + * @throws InvalidObjectException The SQLPrimaryKeys list is malformed + * @throws MetaException error accessing the RDMBS + */ List addPrimaryKeys(List pks) throws InvalidObjectException, MetaException; + /** + * Add a foreign key to a table. + * @param fks foreign key specification + * @return foreign key name. + * @throws InvalidObjectException the specification is malformed. + * @throws MetaException error accessing the RDBMS. + */ List addForeignKeys(List fks) throws InvalidObjectException, MetaException; + /** + * Add unique constraints to a table. + * @param uks unique constraints specification + * @return unique constraint names. + * @throws InvalidObjectException the specification is malformed. + * @throws MetaException error accessing the RDBMS. + */ List addUniqueConstraints(List uks) throws InvalidObjectException, MetaException; + /** + * Add not null constraints to a table. + * @param nns not null constraint specifications + * @return constraint names. + * @throws InvalidObjectException the specification is malformed. + * @throws MetaException error accessing the RDBMS. + */ List addNotNullConstraints(List nns) throws InvalidObjectException, MetaException; - List addDefaultConstraints(List nns) throws InvalidObjectException, MetaException; + /** + * Add default values to a table definition + * @param dv list of default values + * @return constraint names + * @throws InvalidObjectException the specification is malformed. + * @throws MetaException error accessing the RDBMS. + */ + List addDefaultConstraints(List dv) + throws InvalidObjectException, MetaException; - List addCheckConstraints(List nns) throws InvalidObjectException, MetaException; + /** + * Add check constraints to a table + * @param cc check constraints to add + * @return list of constraint names + * @throws InvalidObjectException the specification is malformed + * @throws MetaException error accessing the RDBMS + */ + List addCheckConstraints(List cc) throws InvalidObjectException, MetaException; /** * Gets the unique id of the backing datastore for the metadata diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java index 08ea67fc2f..0074e0142c 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java @@ -48,6 +48,8 @@ import org.apache.hadoop.hive.metastore.model.MTable; import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + /** * This class contains conversion logic that creates Thrift stat objects from * JDO stat objects and plain arrays from DirectSQL. @@ -65,6 +67,7 @@ public static MTableColumnStatistics convertToMTableColumnStatistics(MTable tabl MTableColumnStatistics mColStats = new MTableColumnStatistics(); mColStats.setTable(table); mColStats.setDbName(statsDesc.getDbName()); + mColStats.setCatName(statsDesc.isSetCatName() ? statsDesc.getCatName() : DEFAULT_CATALOG_NAME); mColStats.setTableName(statsDesc.getTableName()); mColStats.setLastAnalyzed(statsDesc.getLastAnalyzed()); mColStats.setColName(statsObj.getColName()); @@ -311,6 +314,7 @@ public static ColumnStatisticsDesc getTableColumnStatisticsDesc( MTableColumnStatistics mStatsObj) { ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(); statsDesc.setIsTblLevel(true); + statsDesc.setCatName(mStatsObj.getCatName()); statsDesc.setDbName(mStatsObj.getDbName()); statsDesc.setTableName(mStatsObj.getTableName()); statsDesc.setLastAnalyzed(mStatsObj.getLastAnalyzed()); @@ -326,6 +330,7 @@ public static MPartitionColumnStatistics convertToMPartitionColumnStatistics( MPartitionColumnStatistics mColStats = new MPartitionColumnStatistics(); mColStats.setPartition(partition); + mColStats.setCatName(statsDesc.isSetCatName() ? statsDesc.getCatName() : DEFAULT_CATALOG_NAME); mColStats.setDbName(statsDesc.getDbName()); mColStats.setTableName(statsDesc.getTableName()); mColStats.setPartitionName(statsDesc.getPartName()); @@ -474,6 +479,7 @@ public static ColumnStatisticsDesc getPartitionColumnStatisticsDesc( MPartitionColumnStatistics mStatsObj) { ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(); statsDesc.setIsTblLevel(false); + statsDesc.setCatName(mStatsObj.getCatName()); statsDesc.setDbName(mStatsObj.getDbName()); statsDesc.setTableName(mStatsObj.getTableName()); statsDesc.setPartName(mStatsObj.getPartitionName()); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java index f977f14005..80dfd301c7 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java @@ -423,8 +423,8 @@ private void validateTableStructure(IHMSHandler hmsHandler, Table table) try { Warehouse wh = hmsHandler.getWh(); if (table.getSd().getLocation() == null || table.getSd().getLocation().isEmpty()) { - tablePath = wh.getDefaultTablePath(hmsHandler.getMS().getDatabase(table.getDbName()), - table.getTableName()); + tablePath = wh.getDefaultTablePath(hmsHandler.getMS().getDatabase( + MetaStoreUtils.getDefaultCatalog(getConf()), table.getDbName()), table.getTableName()); } else { tablePath = wh.getDnsPath(new Path(table.getSd().getLocation())); } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java index d4a08195a9..88cbfcdc4b 100755 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -31,6 +31,7 @@ import java.util.regex.Pattern; import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.utils.FileUtils; @@ -58,10 +59,13 @@ * This class represents a warehouse where data of Hive tables is stored */ public class Warehouse { + public static final String DEFAULT_CATALOG_NAME = "hive"; + public static final String DEFAULT_CATALOG_COMMENT = "Default catalog, for Hive"; public static final String DEFAULT_DATABASE_NAME = "default"; public static final String DEFAULT_DATABASE_COMMENT = "Default Hive database"; public static final String DEFAULT_SERIALIZATION_FORMAT = "1"; public static final String DATABASE_WAREHOUSE_SUFFIX = ".db"; + private static final String CAT_DB_TABLE_SEPARATOR = "."; private Path whRoot; private final Configuration conf; @@ -154,14 +158,59 @@ public Path getWhRoot() throws MetaException { return whRoot; } + /** + * Build the database path based on catalog name and database name. This should only be used + * when a database is being created or altered. If you just want to find out the path a + * database is already using call {@link #getDatabasePath(Database)}. If the passed in + * database already has a path set that will be used. If not the location will be built using + * catalog's path and the database name. + * @param cat catalog the database is in + * @param db database object + * @return Path representing the directory for the database + * @throws MetaException when the file path cannot be properly determined from the configured + * file system. + */ + public Path determineDatabasePath(Catalog cat, Database db) throws MetaException { + if (db.isSetLocationUri()) { + return getDnsPath(new Path(db.getLocationUri())); + } + if (cat == null || cat.getName().equalsIgnoreCase(DEFAULT_CATALOG_NAME)) { + if (db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { + return getWhRoot(); + } else { + return new Path(getWhRoot(), dbDirFromDbName(db)); + } + } else { + return new Path(getDnsPath(new Path(cat.getLocationUri())), dbDirFromDbName(db)); + } + } + + private String dbDirFromDbName(Database db) throws MetaException { + return db.getName().toLowerCase() + DATABASE_WAREHOUSE_SUFFIX; + } + + /** + * Get the path specified by the database. In the case of the default database the root of the + * warehouse is returned. + * @param db database to get the path of + * @return path to the database directory + * @throws MetaException when the file path cannot be properly determined from the configured + * file system. + */ public Path getDatabasePath(Database db) throws MetaException { - if (db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { + if (db.getCatalogName().equalsIgnoreCase(DEFAULT_CATALOG_NAME) && + db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { return getWhRoot(); } return new Path(db.getLocationUri()); } public Path getDefaultDatabasePath(String dbName) throws MetaException { + // TODO CAT - I am fairly certain that most calls to this are in error. This should only be + // used when the database location is unset, which should never happen except when a + // new database is being created. Once I have confirmation of this change calls of this to + // getDatabasePath(), since it does the right thing. Also, merge this with + // determineDatabasePath() as it duplicates much of the logic. if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { return getWhRoot(); } @@ -177,7 +226,8 @@ public Path getDefaultDatabasePath(String dbName) throws MetaException { */ public Path getDefaultTablePath(Database db, String tableName) throws MetaException { - return getDnsPath(new Path(getDatabasePath(db), MetaStoreUtils.encodeTableName(tableName.toLowerCase()))); + return getDnsPath(new Path(getDatabasePath(db), + MetaStoreUtils.encodeTableName(tableName.toLowerCase()))); } public static String getQualifiedName(Table table) { @@ -185,13 +235,37 @@ public static String getQualifiedName(Table table) { } public static String getQualifiedName(String dbName, String tableName) { - return dbName + "." + tableName; + return dbName + CAT_DB_TABLE_SEPARATOR + tableName; } public static String getQualifiedName(Partition partition) { return partition.getDbName() + "." + partition.getTableName() + partition.getValues(); } + /** + * Get table name in cat.db.table format. + * @param table table object + * @return fully qualified name. + */ + public static String getCatalogQualifiedTableName(Table table) { + return getCatalogQualifiedTableName(table.getCatName(), table.getDbName(), table.getTableName()); + } + + /** + * Get table name in cat.db.table format. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @return fully qualified name. + */ + public static String getCatalogQualifiedTableName(String catName, String dbName, String tableName) { + return catName + CAT_DB_TABLE_SEPARATOR + dbName + CAT_DB_TABLE_SEPARATOR + tableName; + } + + public static String getCatalogQualifiedDbName(String catName, String dbName) { + return catName + CAT_DB_TABLE_SEPARATOR + dbName; + } + public boolean mkdirs(Path f) throws MetaException { FileSystem fs; try { diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java index 97d8af6310..944c81313a 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java @@ -32,24 +32,18 @@ public class CacheUtils { private static final String delimit = "\u0001"; - /** - * Builds a key for the table cache which is concatenation of database name and table name - * separated by a delimiter - * - * @param dbName - * @param tableName - * @return - */ - public static String buildTableCacheKey(String dbName, String tableName) { - return dbName + delimit + tableName; + public static String buildCatalogKey(String catName) { + return catName; + } + + public static String buildDbKey(String catName, String dbName) { + return buildKey(catName.toLowerCase(), dbName.toLowerCase()); } /** * Builds a key for the partition cache which is concatenation of partition values, each value * separated by a delimiter * - * @param list of partition values - * @return cache key for partitions cache */ public static String buildPartitionCacheKey(List partVals) { if (partVals == null || partVals.isEmpty()) { @@ -58,13 +52,29 @@ public static String buildPartitionCacheKey(List partVals) { return String.join(delimit, partVals); } + public static String buildTableKey(String catName, String dbName, String tableName) { + return buildKey(catName.toLowerCase(), dbName.toLowerCase(), tableName.toLowerCase()); + } + + public static String buildTableColKey(String catName, String dbName, String tableName, + String colName) { + return buildKey(catName, dbName, tableName, colName); + } + + private static String buildKey(String... elements) { + return org.apache.commons.lang.StringUtils.join(elements, delimit); + } + + public static String[] splitDbName(String key) { + String[] names = key.split(delimit); + assert names.length == 2; + return names; + } + /** * Builds a key for the partitions column cache which is concatenation of partition values, each * value separated by a delimiter and the column name * - * @param list of partition values - * @param column name - * @return cache key for partitions column stats cache */ public static String buildPartitonColStatsCacheKey(List partVals, String colName) { return buildPartitionCacheKey(partVals) + delimit + colName; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 1d072ad287..c47856de87 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -17,15 +17,13 @@ */ package org.apache.hadoop.hive.metastore.cache; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.ISchemaName; -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import java.io.Closeable; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -35,6 +33,8 @@ import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -51,9 +51,11 @@ import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -61,6 +63,7 @@ import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.ISchema; +import org.apache.hadoop.hive.metastore.api.ISchemaName; import org.apache.hadoop.hive.metastore.api.InvalidInputException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; @@ -95,12 +98,16 @@ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; import org.apache.hadoop.hive.metastore.api.SchemaVersion; +import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.hadoop.hive.metastore.api.Type; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMMapping; import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -117,6 +124,10 @@ import com.google.common.annotations.VisibleForTesting; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; +import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; + // TODO filter->expr // TODO functionCache // TODO constraintCache @@ -187,9 +198,6 @@ private void setConfInternal(Configuration conf) { /** * This initializes the caches in SharedCache by getting the objects from Metastore DB via * ObjectStore and populating the respective caches - * - * @param rawStore - * @throws Exception */ static void prewarm(RawStore rawStore) { if (isCachePrewarmed.get()) { @@ -200,46 +208,68 @@ static void prewarm(RawStore rawStore) { while (!isCachePrewarmed.get()) { // Prevents throwing exceptions in our raw store calls since we're not using RawStoreProxy Deadline.registerIfNot(1000000); - List dbNames; + Collection catalogsToCache; try { - dbNames = rawStore.getAllDatabases(); - } catch (MetaException e) { - // Try again + catalogsToCache = catalogsToCache(rawStore); + LOG.info("Going to cache catalogs: " + + org.apache.commons.lang.StringUtils.join(catalogsToCache, ", ")); + List catalogs = new ArrayList<>(catalogsToCache.size()); + for (String catName : catalogsToCache) catalogs.add(rawStore.getCatalog(catName)); + sharedCache.populateCatalogsInCache(catalogs); + } catch (MetaException|NoSuchObjectException e) { + LOG.warn("Failed to populate catalogs in cache, going to try again", e); + // try again continue; } - LOG.info("Number of databases to prewarm: {}", dbNames.size()); - List databases = new ArrayList<>(dbNames.size()); - for (String dbName : dbNames) { + LOG.info("Finished prewarming catalogs, starting on databases"); + List databases = new ArrayList<>(); + for (String catName : catalogsToCache) { try { - databases.add(rawStore.getDatabase(dbName)); - } catch (NoSuchObjectException e) { - // Continue with next database - continue; + List dbNames = rawStore.getAllDatabases(catName); + LOG.info("Number of databases to prewarm in catalog {}: {}", catName, dbNames.size()); + for (String dbName : dbNames) { + try { + databases.add(rawStore.getDatabase(catName, dbName)); + } catch (NoSuchObjectException e) { + // Continue with next database + LOG.warn("Failed to cache database " + + Warehouse.getCatalogQualifiedDbName(catName, dbName) + ", moving on", e); + } + } + } catch (MetaException e) { + LOG.warn("Failed to cache databases in catalog " + catName + ", moving on", e); } } sharedCache.populateDatabasesInCache(databases); LOG.debug( "Databases cache is now prewarmed. Now adding tables, partitions and statistics to the cache"); int numberOfDatabasesCachedSoFar = 0; - for (String dbName : dbNames) { - dbName = StringUtils.normalizeIdentifier(dbName); + for (Database db : databases) { + String catName = StringUtils.normalizeIdentifier(db.getCatalogName()); + String dbName = StringUtils.normalizeIdentifier(db.getName()); List tblNames; try { - tblNames = rawStore.getAllTables(dbName); + tblNames = rawStore.getAllTables(catName, dbName); } catch (MetaException e) { + LOG.warn("Failed to cache tables for database " + + Warehouse.getCatalogQualifiedDbName(catName, dbName) + ", moving on"); // Continue with next database continue; } int numberOfTablesCachedSoFar = 0; for (String tblName : tblNames) { tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + if (!shouldCacheTable(catName, dbName, tblName)) { continue; + } Table table; try { - table = rawStore.getTable(dbName, tblName); + table = rawStore.getTable(catName, dbName, tblName); } catch (MetaException e) { + LOG.warn("Failed cache table " + + Warehouse.getCatalogQualifiedTableName(catName, dbName, tblName) + + ", moving on"); // It is possible the table is deleted during fetching tables of the database, // in that case, continue with the next table continue; @@ -253,7 +283,7 @@ static void prewarm(RawStore rawStore) { AggrStats aggrStatsAllButDefaultPartition = null; if (table.isSetPartitionKeys()) { Deadline.startTimer("getPartitions"); - partitions = rawStore.getPartitions(dbName, tblName, Integer.MAX_VALUE); + partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE); Deadline.stopTimer(); List partNames = new ArrayList<>(partitions.size()); for (Partition p : partitions) { @@ -263,13 +293,13 @@ static void prewarm(RawStore rawStore) { // Get partition column stats for this table Deadline.startTimer("getPartitionColumnStatistics"); partitionColStats = - rawStore.getPartitionColumnStatistics(dbName, tblName, partNames, colNames); + rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); // Get aggregate stats for all partitions of a table and for all but default // partition Deadline.startTimer("getAggrPartitionColumnStatistics"); aggrStatsAllPartitions = - rawStore.get_aggr_stats_for(dbName, tblName, partNames, colNames); + rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); // Remove default partition from partition names and get aggregate // stats again @@ -286,12 +316,12 @@ static void prewarm(RawStore rawStore) { partNames.remove(defaultPartitionName); Deadline.startTimer("getAggrPartitionColumnStatistics"); aggrStatsAllButDefaultPartition = - rawStore.get_aggr_stats_for(dbName, tblName, partNames, colNames); + rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); } } else { Deadline.startTimer("getTableColumnStatistics"); - tableColStats = rawStore.getTableColumnStatistics(dbName, tblName, colNames); + tableColStats = rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); Deadline.stopTimer(); } sharedCache.populateTableInCache(table, tableColStats, partitions, partitionColStats, @@ -304,7 +334,7 @@ static void prewarm(RawStore rawStore) { tblName, ++numberOfTablesCachedSoFar, tblNames.size()); } LOG.debug("Processed database: {}. Cached {} / {} databases so far.", dbName, - ++numberOfDatabasesCachedSoFar, dbNames.size()); + ++numberOfDatabasesCachedSoFar, databases.size()); } isCachePrewarmed.set(true); } @@ -328,6 +358,17 @@ private static void initBlackListWhiteList(Configuration conf) { } } + private static Collection catalogsToCache(RawStore rs) throws MetaException { + Collection confValue = + MetastoreConf.getStringCollection(rs.getConf(), ConfVars.CATALOGS_TO_CACHE); + if (confValue == null || confValue.isEmpty() || + (confValue.size() == 1 && confValue.contains(""))) { + return rs.getCatalogs(); + } else { + return confValue; + } + } + @VisibleForTesting /** * This starts a background thread, which initially populates the SharedCache and later @@ -426,85 +467,91 @@ public void run() { void update() { Deadline.registerIfNot(1000000); LOG.debug("CachedStore: updating cached objects"); - List dbNames; try { - dbNames = rawStore.getAllDatabases(); - } catch (MetaException e) { - LOG.error("Updating CachedStore: error happen when refresh; skipping this iteration", e); - return; - } - // Update the database in cache - updateDatabases(rawStore, dbNames); - for (String dbName : dbNames) { - // Update the tables in cache - updateTables(rawStore, dbName); - List tblNames; - try { - tblNames = rawStore.getAllTables(dbName); - } catch (MetaException e) { - // Continue with next database - continue; - } - for (String tblName : tblNames) { - if (!shouldCacheTable(dbName, tblName)) { - continue; + for (String catName : catalogsToCache(rawStore)) { + List dbNames = rawStore.getAllDatabases(catName); + // Update the database in cache + updateDatabases(rawStore, catName, dbNames); + for (String dbName : dbNames) { + // Update the tables in cache + updateTables(rawStore, catName, dbName); + List tblNames; + try { + tblNames = rawStore.getAllTables(catName, dbName); + } catch (MetaException e) { + // Continue with next database + continue; + } + for (String tblName : tblNames) { + if (!shouldCacheTable(catName, dbName, tblName)) { + continue; + } + // Update the table column stats for a table in cache + updateTableColStats(rawStore, catName, dbName, tblName); + // Update the partitions for a table in cache + updateTablePartitions(rawStore, catName, dbName, tblName); + // Update the partition col stats for a table in cache + updateTablePartitionColStats(rawStore, catName, dbName, tblName); + // Update aggregate partition column stats for a table in cache + updateTableAggregatePartitionColStats(rawStore, catName, dbName, tblName); + } } - // Update the table column stats for a table in cache - updateTableColStats(rawStore, dbName, tblName); - // Update the partitions for a table in cache - updateTablePartitions(rawStore, dbName, tblName); - // Update the partition col stats for a table in cache - updateTablePartitionColStats(rawStore, dbName, tblName); - // Update aggregate partition column stats for a table in cache - updateTableAggregatePartitionColStats(rawStore, dbName, tblName); - } } sharedCache.incrementUpdateCount(); + } catch (MetaException e) { + LOG.error("Updating CachedStore: error happen when refresh; skipping this iteration", e); + } } - private void updateDatabases(RawStore rawStore, List dbNames) { - List databases = new ArrayList<>(dbNames.size()); + + private void updateDatabases(RawStore rawStore, String catName, List dbNames) { + // Prepare the list of databases + List databases = new ArrayList<>(); for (String dbName : dbNames) { Database db; try { - db = rawStore.getDatabase(dbName); + db = rawStore.getDatabase(catName, dbName); databases.add(db); } catch (NoSuchObjectException e) { - LOG.info("Updating CachedStore: database - " + dbName + " does not exist.", e); + LOG.info("Updating CachedStore: database - " + catName + "." + dbName + + " does not exist.", e); } } sharedCache.refreshDatabasesInCache(databases); } - private void updateTables(RawStore rawStore, String dbName) { + private void updateTables(RawStore rawStore, String catName, String dbName) { List
tables = new ArrayList<>(); try { - List tblNames = rawStore.getAllTables(dbName); + List tblNames = rawStore.getAllTables(catName, dbName); for (String tblName : tblNames) { - if (!shouldCacheTable(dbName, tblName)) { + if (!shouldCacheTable(catName, dbName, tblName)) { continue; } - Table table = rawStore.getTable(StringUtils.normalizeIdentifier(dbName), + Table table = rawStore.getTable(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName)); tables.add(table); } - sharedCache.refreshTablesInCache(dbName, tables); + sharedCache.refreshTablesInCache(catName, dbName, tables); } catch (MetaException e) { LOG.debug("Unable to refresh cached tables for database: " + dbName, e); } } - private void updateTableColStats(RawStore rawStore, String dbName, String tblName) { + + private void updateTableColStats(RawStore rawStore, String catName, String dbName, String tblName) { try { - Table table = rawStore.getTable(dbName, tblName); + Table table = rawStore.getTable(catName, dbName, tblName); if (!table.isSetPartitionKeys()) { List colNames = MetaStoreUtils.getColumnNamesForTable(table); Deadline.startTimer("getTableColumnStatistics"); ColumnStatistics tableColStats = - rawStore.getTableColumnStatistics(dbName, tblName, colNames); + rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); Deadline.stopTimer(); if (tableColStats != null) { - sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(dbName), + sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj()); } } @@ -513,29 +560,30 @@ private void updateTableColStats(RawStore rawStore, String dbName, String tblNam } } - private void updateTablePartitions(RawStore rawStore, String dbName, String tblName) { + private void updateTablePartitions(RawStore rawStore, String catName, String dbName, String tblName) { try { Deadline.startTimer("getPartitions"); - List partitions = rawStore.getPartitions(dbName, tblName, Integer.MAX_VALUE); + List partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE); Deadline.stopTimer(); - sharedCache.refreshPartitionsInCache(StringUtils.normalizeIdentifier(dbName), + sharedCache.refreshPartitionsInCache(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), partitions); } catch (MetaException | NoSuchObjectException e) { LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e); } } - private void updateTablePartitionColStats(RawStore rawStore, String dbName, String tblName) { + private void updateTablePartitionColStats(RawStore rawStore, String catName, String dbName, String tblName) { try { - Table table = rawStore.getTable(dbName, tblName); + Table table = rawStore.getTable(catName, dbName, tblName); List colNames = MetaStoreUtils.getColumnNamesForTable(table); - List partNames = rawStore.listPartitionNames(dbName, tblName, (short) -1); + List partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); // Get partition column stats for this table Deadline.startTimer("getPartitionColumnStatistics"); List partitionColStats = - rawStore.getPartitionColumnStatistics(dbName, tblName, partNames, colNames); + rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); - sharedCache.refreshPartitionColStatsInCache(dbName, tblName, partitionColStats); + sharedCache.refreshPartitionColStatsInCache(catName, dbName, tblName, partitionColStats); } catch (MetaException | NoSuchObjectException e) { LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e); } @@ -543,16 +591,16 @@ private void updateTablePartitionColStats(RawStore rawStore, String dbName, Stri // Update cached aggregate stats for all partitions of a table and for all // but default partition - private void updateTableAggregatePartitionColStats(RawStore rawStore, String dbName, - String tblName) { + private void updateTableAggregatePartitionColStats(RawStore rawStore, String catName, String dbName, + String tblName) { try { - Table table = rawStore.getTable(dbName, tblName); - List partNames = rawStore.listPartitionNames(dbName, tblName, (short) -1); + Table table = rawStore.getTable(catName, dbName, tblName); + List partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); List colNames = MetaStoreUtils.getColumnNamesForTable(table); if ((partNames != null) && (partNames.size() > 0)) { Deadline.startTimer("getAggregareStatsForAllPartitions"); AggrStats aggrStatsAllPartitions = - rawStore.get_aggr_stats_for(dbName, tblName, partNames, colNames); + rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); // Remove default partition from partition names and get aggregate stats again List partKeys = table.getPartitionKeys(); @@ -568,9 +616,10 @@ private void updateTableAggregatePartitionColStats(RawStore rawStore, String dbN partNames.remove(defaultPartitionName); Deadline.startTimer("getAggregareStatsForAllPartitionsExceptDefault"); AggrStats aggrStatsAllButDefaultPartition = - rawStore.get_aggr_stats_for(dbName, tblName, partNames, colNames); + rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); - sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(dbName), + sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); } @@ -612,19 +661,59 @@ public void rollbackTransaction() { } @Override + public void createCatalog(Catalog cat) throws MetaException { + rawStore.createCatalog(cat); + sharedCache.addCatalogToCache(cat); + } + + @Override + public void alterCatalog(String catName, Catalog cat) throws MetaException, + InvalidOperationException { + rawStore.alterCatalog(catName, cat); + sharedCache.alterCatalogInCache(StringUtils.normalizeIdentifier(catName), cat); + } + + @Override + public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException { + if (!sharedCache.isCatalogCachePrewarmed()) { + return rawStore.getCatalog(catalogName); + } + Catalog cat = sharedCache.getCatalogFromCache(normalizeIdentifier(catalogName)); + if (cat == null) { + throw new NoSuchObjectException(); + } + return cat; + } + + @Override + public List getCatalogs() throws MetaException { + if (!sharedCache.isCatalogCachePrewarmed()) { + return rawStore.getCatalogs(); + } + return sharedCache.listCachedCatalogs(); + } + + @Override + public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException { + rawStore.dropCatalog(catalogName); + catalogName = catalogName.toLowerCase(); + sharedCache.removeCatalogFromCache(catalogName); + } + + @Override public void createDatabase(Database db) throws InvalidObjectException, MetaException { rawStore.createDatabase(db); sharedCache.addDatabaseToCache(db); } @Override - public Database getDatabase(String dbName) throws NoSuchObjectException { + public Database getDatabase(String catName, String dbName) throws NoSuchObjectException { if (!sharedCache.isDatabaseCachePrewarmed()) { - return rawStore.getDatabase(dbName); + return rawStore.getDatabase(catName, dbName); } dbName = dbName.toLowerCase(); - Database db = - sharedCache.getDatabaseFromCache(StringUtils.normalizeIdentifier(dbName)); + Database db = sharedCache.getDatabaseFromCache(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName)); if (db == null) { throw new NoSuchObjectException(); } @@ -632,40 +721,40 @@ public Database getDatabase(String dbName) throws NoSuchObjectException { } @Override - public boolean dropDatabase(String dbName) throws NoSuchObjectException, MetaException { - boolean succ = rawStore.dropDatabase(dbName); + public boolean dropDatabase(String catName, String dbName) throws NoSuchObjectException, MetaException { + boolean succ = rawStore.dropDatabase(catName, dbName); if (succ) { - dbName = dbName.toLowerCase(); - sharedCache.removeDatabaseFromCache(StringUtils.normalizeIdentifier(dbName)); + sharedCache.removeDatabaseFromCache(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName)); } return succ; } @Override - public boolean alterDatabase(String dbName, Database db) + public boolean alterDatabase(String catName, String dbName, Database db) throws NoSuchObjectException, MetaException { - boolean succ = rawStore.alterDatabase(dbName, db); + boolean succ = rawStore.alterDatabase(catName, dbName, db); if (succ) { - dbName = dbName.toLowerCase(); - sharedCache.alterDatabaseInCache(StringUtils.normalizeIdentifier(dbName), db); + sharedCache.alterDatabaseInCache(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), db); } return succ; } @Override - public List getDatabases(String pattern) throws MetaException { + public List getDatabases(String catName, String pattern) throws MetaException { if (!sharedCache.isDatabaseCachePrewarmed()) { - return rawStore.getDatabases(pattern); + return rawStore.getDatabases(catName, pattern); } - return sharedCache.listCachedDatabases(pattern); + return sharedCache.listCachedDatabases(catName, pattern); } @Override - public List getAllDatabases() throws MetaException { + public List getAllDatabases(String catName) throws MetaException { if (!sharedCache.isDatabaseCachePrewarmed()) { - return rawStore.getAllDatabases(); + return rawStore.getAllDatabases(catName); } - return sharedCache.listCachedDatabases(); + return sharedCache.listCachedDatabases(catName); } @Override @@ -704,41 +793,44 @@ private void validateTableType(Table tbl) { @Override public void createTable(Table tbl) throws InvalidObjectException, MetaException { rawStore.createTable(tbl); - String dbName = StringUtils.normalizeIdentifier(tbl.getDbName()); - String tblName = StringUtils.normalizeIdentifier(tbl.getTableName()); - if (!shouldCacheTable(dbName, tblName)) { + String catName = normalizeIdentifier(tbl.getCatName()); + String dbName = normalizeIdentifier(tbl.getDbName()); + String tblName = normalizeIdentifier(tbl.getTableName()); + if (!shouldCacheTable(catName, dbName, tblName)) { return; } validateTableType(tbl); - sharedCache.addTableToCache(dbName, tblName, tbl); + sharedCache.addTableToCache(catName, dbName, tblName, tbl); } @Override - public boolean dropTable(String dbName, String tblName) + public boolean dropTable(String catName, String dbName, String tblName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.dropTable(dbName, tblName); + boolean succ = rawStore.dropTable(catName, dbName, tblName); if (succ) { - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - sharedCache.removeTableFromCache(dbName, tblName); + sharedCache.removeTableFromCache(catName, dbName, tblName); } return succ; } @Override - public Table getTable(String dbName, String tblName) throws MetaException { + public Table getTable(String catName, String dbName, String tblName) throws MetaException { + catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getTable(dbName, tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getTable(catName, dbName, tblName); } - Table tbl = sharedCache.getTableFromCache(dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); if (tbl == null) { // This table is not yet loaded in cache - return rawStore.getTable(dbName, tblName); + return rawStore.getTable(catName, dbName, tblName); } if (tbl != null) { tbl.unsetPrivileges(); @@ -751,220 +843,232 @@ public Table getTable(String dbName, String tblName) throws MetaException { public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { boolean succ = rawStore.addPartition(part); if (succ) { - String dbName = StringUtils.normalizeIdentifier(part.getDbName()); - String tblName = StringUtils.normalizeIdentifier(part.getTableName()); - if (!shouldCacheTable(dbName, tblName)) { + String dbName = normalizeIdentifier(part.getDbName()); + String tblName = normalizeIdentifier(part.getTableName()); + String catName = part.isSetCatName() ? normalizeIdentifier(part.getCatName()) : DEFAULT_CATALOG_NAME; + if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - sharedCache.addPartitionToCache(dbName, tblName, part); + sharedCache.addPartitionToCache(catName, dbName, tblName, part); } return succ; } @Override - public boolean addPartitions(String dbName, String tblName, List parts) + public boolean addPartitions(String catName, String dbName, String tblName, List parts) throws InvalidObjectException, MetaException { - boolean succ = rawStore.addPartitions(dbName, tblName, parts); + boolean succ = rawStore.addPartitions(catName, dbName, tblName, parts); if (succ) { - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - sharedCache.addPartitionsToCache(dbName, tblName, parts); + sharedCache.addPartitionsToCache(catName, dbName, tblName, parts); } return succ; } @Override - public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, + public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { - boolean succ = rawStore.addPartitions(dbName, tblName, partitionSpec, ifNotExists); + boolean succ = rawStore.addPartitions(catName, dbName, tblName, partitionSpec, ifNotExists); if (succ) { - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator(); while (iterator.hasNext()) { Partition part = iterator.next(); - sharedCache.addPartitionToCache(dbName, tblName, part); + sharedCache.addPartitionToCache(catName, dbName, tblName, part); } } return succ; } @Override - public Partition getPartition(String dbName, String tblName, List part_vals) + public Partition getPartition(String catName, String dbName, String tblName, List part_vals) throws MetaException, NoSuchObjectException { + catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getPartition(dbName, tblName, part_vals); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartition(catName, dbName, tblName, part_vals); } - Partition part = sharedCache.getPartitionFromCache(dbName, tblName, part_vals); + Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, part_vals); if (part == null) { // The table containing the partition is not yet loaded in cache - return rawStore.getPartition(dbName, tblName, part_vals); + return rawStore.getPartition(catName, dbName, tblName, part_vals); } return part; } @Override - public boolean doesPartitionExist(String dbName, String tblName, + public boolean doesPartitionExist(String catName, String dbName, String tblName, List part_vals) throws MetaException, NoSuchObjectException { + catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.doesPartitionExist(dbName, tblName, part_vals); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.doesPartitionExist(catName, dbName, tblName, part_vals); } - Table tbl = sharedCache.getTableFromCache(dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); if (tbl == null) { // The table containing the partition is not yet loaded in cache - return rawStore.doesPartitionExist(dbName, tblName, part_vals); + return rawStore.doesPartitionExist(catName, dbName, tblName, part_vals); } - return sharedCache.existPartitionFromCache(dbName, tblName, part_vals); + return sharedCache.existPartitionFromCache(catName, dbName, tblName, part_vals); } @Override - public boolean dropPartition(String dbName, String tblName, List part_vals) + public boolean dropPartition(String catName, String dbName, String tblName, List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.dropPartition(dbName, tblName, part_vals); + boolean succ = rawStore.dropPartition(catName, dbName, tblName, part_vals); if (succ) { - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - sharedCache.removePartitionFromCache(dbName, tblName, part_vals); + sharedCache.removePartitionFromCache(catName, dbName, tblName, part_vals); } return succ; } @Override - public void dropPartitions(String dbName, String tblName, List partNames) + public void dropPartitions(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { - rawStore.dropPartitions(dbName, tblName, partNames); + rawStore.dropPartitions(catName, dbName, tblName, partNames); + catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + if (!shouldCacheTable(catName, dbName, tblName)) { return; } - List> partVals = new ArrayList>(); + List> partVals = new ArrayList<>(); for (String partName : partNames) { partVals.add(partNameToVals(partName)); } - sharedCache.removePartitionsFromCache(dbName, tblName, partVals); + sharedCache.removePartitionsFromCache(catName, dbName, tblName, partVals); } @Override - public List getPartitions(String dbName, String tblName, int max) + public List getPartitions(String catName, String dbName, String tblName, int max) throws MetaException, NoSuchObjectException { + catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getPartitions(dbName, tblName, max); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitions(catName, dbName, tblName, max); } - Table tbl = sharedCache.getTableFromCache(dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); if (tbl == null) { // The table containing the partitions is not yet loaded in cache - return rawStore.getPartitions(dbName, tblName, max); + return rawStore.getPartitions(catName, dbName, tblName, max); } - List parts = sharedCache.listCachedPartitions(dbName, tblName, max); + List parts = sharedCache.listCachedPartitions(catName, dbName, tblName, max); return parts; } @Override - public void alterTable(String dbName, String tblName, Table newTable) + public void alterTable(String catName, String dbName, String tblName, Table newTable) throws InvalidObjectException, MetaException { - rawStore.alterTable(dbName, tblName, newTable); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - String newTblName = StringUtils.normalizeIdentifier(newTable.getTableName()); - if (!shouldCacheTable(dbName, tblName) && !shouldCacheTable(dbName, newTblName)) { + rawStore.alterTable(catName, dbName, tblName, newTable); + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + String newTblName = normalizeIdentifier(newTable.getTableName()); + if (!shouldCacheTable(catName, dbName, tblName) && + !shouldCacheTable(catName, dbName, newTblName)) { return; } - Table tbl = sharedCache.getTableFromCache(dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); if (tbl == null) { // The table is not yet loaded in cache return; } - if (shouldCacheTable(dbName, tblName) && shouldCacheTable(dbName, newTblName)) { + if (shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) { // If old table is in the cache and the new table can also be cached - sharedCache.alterTableInCache(dbName, tblName, newTable); - } else if (!shouldCacheTable(dbName, tblName) && shouldCacheTable(dbName, newTblName)) { + sharedCache.alterTableInCache(catName, dbName, tblName, newTable); + } else if (!shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) { // If old table is *not* in the cache but the new table can be cached - sharedCache.addTableToCache(dbName, newTblName, newTable); - } else if (shouldCacheTable(dbName, tblName) && !shouldCacheTable(dbName, newTblName)) { + sharedCache.addTableToCache(catName, dbName, newTblName, newTable); + } else if (shouldCacheTable(catName, dbName, tblName) && !shouldCacheTable(catName, dbName, newTblName)) { // If old table is in the cache but the new table *cannot* be cached - sharedCache.removeTableFromCache(dbName, tblName); + sharedCache.removeTableFromCache(catName, dbName, tblName); } } @Override - public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) throws MetaException { - rawStore.updateCreationMetadata(dbname, tablename, cm); + rawStore.updateCreationMetadata(catName, dbname, tablename, cm); } @Override - public List getTables(String dbName, String pattern) throws MetaException { + public List getTables(String catName, String dbName, String pattern) throws MetaException { if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) { - return rawStore.getTables(dbName, pattern); + return rawStore.getTables(catName, dbName, pattern); } - return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(dbName), pattern, - (short) -1); + return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), pattern, (short) -1); } @Override - public List getTables(String dbName, String pattern, TableType tableType) + public List getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException { if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) { - return rawStore.getTables(dbName, pattern, tableType); + return rawStore.getTables(catName, dbName, pattern, tableType); } - return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(dbName), pattern, - tableType); + return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), pattern, tableType); } @Override - public List getMaterializedViewsForRewriting(String dbName) + public List getMaterializedViewsForRewriting(String catName, String dbName) throws MetaException, NoSuchObjectException { - return rawStore.getMaterializedViewsForRewriting(dbName); + return rawStore.getMaterializedViewsForRewriting(catName, dbName); } @Override - public List getTableMeta(String dbNames, String tableNames, List tableTypes) - throws MetaException { + public List getTableMeta(String catName, String dbNames, String tableNames, + List tableTypes) throws MetaException { // TODO Check if all required tables are allowed, if so, get it from cache if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) { - return rawStore.getTableMeta(dbNames, tableNames, tableTypes); + return rawStore.getTableMeta(catName, dbNames, tableNames, tableTypes); } - return sharedCache.getTableMeta(StringUtils.normalizeIdentifier(dbNames), + return sharedCache.getTableMeta(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbNames), StringUtils.normalizeIdentifier(tableNames), tableTypes); } @Override - public List
getTableObjectsByName(String dbName, List tblNames) + public List
getTableObjectsByName(String catName, String dbName, List tblNames) throws MetaException, UnknownDBException { - dbName = StringUtils.normalizeIdentifier(dbName); + dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); boolean missSomeInCache = false; for (String tblName : tblNames) { - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { missSomeInCache = true; break; } } if (!isCachePrewarmed.get() || missSomeInCache) { - return rawStore.getTableObjectsByName(dbName, tblNames); + return rawStore.getTableObjectsByName(catName, dbName, tblNames); } List
tables = new ArrayList<>(); for (String tblName : tblNames) { - tblName = StringUtils.normalizeIdentifier(tblName); - Table tbl = sharedCache.getTableFromCache(dbName, tblName); + tblName = normalizeIdentifier(tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); if (tbl == null) { - tbl = rawStore.getTable(dbName, tblName); + tbl = rawStore.getTable(catName, dbName, tblName); } tables.add(tbl); } @@ -972,39 +1076,42 @@ public void updateCreationMetadata(String dbname, String tablename, CreationMeta } @Override - public List getAllTables(String dbName) throws MetaException { + public List getAllTables(String catName, String dbName) throws MetaException { if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) { - return rawStore.getAllTables(dbName); + return rawStore.getAllTables(catName, dbName); } - return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(dbName)); + return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName)); } @Override - public List listTableNamesByFilter(String dbName, String filter, short max_tables) + public List listTableNamesByFilter(String catName, String dbName, String filter, + short max_tables) throws MetaException, UnknownDBException { if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) { - return rawStore.listTableNamesByFilter(dbName, filter, max_tables); + return rawStore.listTableNamesByFilter(catName, dbName, filter, max_tables); } - return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(dbName), filter, - max_tables); + return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), filter, max_tables); } @Override - public List listPartitionNames(String dbName, String tblName, + public List listPartitionNames(String catName, String dbName, String tblName, short max_parts) throws MetaException { + catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.listPartitionNames(dbName, tblName, max_parts); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.listPartitionNames(catName, dbName, tblName, max_parts); } - Table tbl = sharedCache.getTableFromCache(dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); if (tbl == null) { // The table is not yet loaded in cache - return rawStore.listPartitionNames(dbName, tblName, max_parts); + return rawStore.listPartitionNames(catName, dbName, tblName, max_parts); } List partitionNames = new ArrayList<>(); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(dbName, tblName, max_parts)) { + for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, max_parts)) { if (max_parts == -1 || count < max_parts) { partitionNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues())); } @@ -1013,48 +1120,45 @@ public void updateCreationMetadata(String dbname, String tablename, CreationMeta } @Override - public PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, + public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException { throw new UnsupportedOperationException(); } @Override - public List listPartitionNamesByFilter(String dbName, - String tblName, String filter, short max_parts) throws MetaException { - // TODO Translate filter -> expr - return rawStore.listPartitionNamesByFilter(dbName, tblName, filter, max_parts); - } - - @Override - public void alterPartition(String dbName, String tblName, List partVals, Partition newPart) - throws InvalidObjectException, MetaException { - rawStore.alterPartition(dbName, tblName, partVals, newPart); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + public void alterPartition(String catName, String dbName, String tblName, List partVals, + Partition newPart) throws InvalidObjectException, MetaException { + rawStore.alterPartition(catName, dbName, tblName, partVals, newPart); + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { return; } - sharedCache.alterPartitionInCache(dbName, tblName, partVals, newPart); + sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, newPart); } @Override - public void alterPartitions(String dbName, String tblName, List> partValsList, - List newParts) throws InvalidObjectException, MetaException { - rawStore.alterPartitions(dbName, tblName, partValsList, newParts); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + public void alterPartitions(String catName, String dbName, String tblName, + List> partValsList, List newParts) + throws InvalidObjectException, MetaException { + rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts); + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { return; } - sharedCache.alterPartitionsInCache(dbName, tblName, partValsList, newParts); + sharedCache.alterPartitionsInCache(catName, dbName, tblName, partValsList, newParts); } private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, String defaultPartName, short maxParts, List result, SharedCache sharedCache) throws MetaException, NoSuchObjectException { List parts = - sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(table.getDbName()), + sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(table.getCatName()), + StringUtils.normalizeIdentifier(table.getDbName()), StringUtils.normalizeIdentifier(table.getTableName()), maxParts); for (Partition part : parts) { result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues())); @@ -1067,26 +1171,27 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, } @Override - public List getPartitionsByFilter(String dbName, String tblName, + public List getPartitionsByFilter(String catName, String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException { - return rawStore.getPartitionsByFilter(dbName, tblName, filter, maxParts); + return rawStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts); } @Override - public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, + public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException { + catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getPartitionsByExpr(dbName, tblName, expr, defaultPartitionName, maxParts, + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); } List partNames = new LinkedList<>(); - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.getPartitionsByExpr(dbName, tblName, expr, defaultPartitionName, maxParts, + return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); } boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn(table, expr, @@ -1095,25 +1200,26 @@ public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, } @Override - public int getNumPartitionsByFilter(String dbName, String tblName, String filter) + public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException { - return rawStore.getNumPartitionsByFilter(dbName, tblName, filter); + return rawStore.getNumPartitionsByFilter(catName, dbName, tblName, filter); } @Override - public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) + public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException { + catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getNumPartitionsByExpr(dbName, tblName, expr); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); } String defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); List partNames = new LinkedList<>(); - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.getNumPartitionsByExpr(dbName, tblName, expr); + return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); } getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartName, Short.MAX_VALUE, partNames, sharedCache); @@ -1133,21 +1239,22 @@ public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) } @Override - public List getPartitionsByNames(String dbName, String tblName, + public List getPartitionsByNames(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { + catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getPartitionsByNames(dbName, tblName, partNames); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); } - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.getPartitionsByNames(dbName, tblName, partNames); + return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); } List partitions = new ArrayList<>(); for (String partName : partNames) { - Partition part = sharedCache.getPartitionFromCache(dbName, tblName, partNameToVals(partName)); + Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, partNameToVals(partName)); if (part!=null) { partitions.add(part); } @@ -1156,19 +1263,19 @@ public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) } @Override - public Table markPartitionForEvent(String dbName, String tblName, + public Table markPartitionForEvent(String catName, String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { - return rawStore.markPartitionForEvent(dbName, tblName, partVals, evtType); + return rawStore.markPartitionForEvent(catName, dbName, tblName, partVals, evtType); } @Override - public boolean isPartitionMarkedForEvent(String dbName, String tblName, + public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { - return rawStore.isPartitionMarkedForEvent(dbName, tblName, partName, evtType); + return rawStore.isPartitionMarkedForEvent(catName, dbName, tblName, partName, evtType); } @Override @@ -1205,31 +1312,31 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, } @Override - public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, + public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return rawStore.getDBPrivilegeSet(dbName, userName, groupNames); + return rawStore.getDBPrivilegeSet(catName, dbName, userName, groupNames); } @Override - public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, + public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return rawStore.getTablePrivilegeSet(dbName, tableName, userName, groupNames); + return rawStore.getTablePrivilegeSet(catName, dbName, tableName, userName, groupNames); } @Override - public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, + public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName, String partition, String userName, List groupNames) throws InvalidObjectException, MetaException { - return rawStore.getPartitionPrivilegeSet(dbName, tableName, partition, userName, groupNames); + return rawStore.getPartitionPrivilegeSet(catName, dbName, tableName, partition, userName, groupNames); } @Override - public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, + public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return rawStore.getColumnPrivilegeSet(dbName, tableName, partitionName, columnName, userName, groupNames); + return rawStore.getColumnPrivilegeSet(catName, dbName, tableName, partitionName, columnName, userName, groupNames); } @Override @@ -1240,36 +1347,36 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, @Override public List listPrincipalDBGrants(String principalName, - PrincipalType principalType, String dbName) { - return rawStore.listPrincipalDBGrants(principalName, principalType, dbName); + PrincipalType principalType, String catName, String dbName) { + return rawStore.listPrincipalDBGrants(principalName, principalType, catName, dbName); } @Override public List listAllTableGrants(String principalName, - PrincipalType principalType, String dbName, String tableName) { - return rawStore.listAllTableGrants(principalName, principalType, dbName, tableName); + PrincipalType principalType, String catName, String dbName, String tableName) { + return rawStore.listAllTableGrants(principalName, principalType, catName, dbName, tableName); } @Override public List listPrincipalPartitionGrants( - String principalName, PrincipalType principalType, String dbName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, List partValues, String partName) { - return rawStore.listPrincipalPartitionGrants(principalName, principalType, dbName, tableName, partValues, partName); + return rawStore.listPrincipalPartitionGrants(principalName, principalType, catName, dbName, tableName, partValues, partName); } @Override public List listPrincipalTableColumnGrants( - String principalName, PrincipalType principalType, String dbName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, String columnName) { - return rawStore.listPrincipalTableColumnGrants(principalName, principalType, dbName, tableName, columnName); + return rawStore.listPrincipalTableColumnGrants(principalName, principalType, catName, dbName, tableName, columnName); } @Override public List listPrincipalPartitionColumnGrants( - String principalName, PrincipalType principalType, String dbName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, List partValues, String partName, String columnName) { - return rawStore.listPrincipalPartitionColumnGrants(principalName, principalType, dbName, tableName, partValues, partName, columnName); + return rawStore.listPrincipalPartitionColumnGrants(principalName, principalType, catName, dbName, tableName, partValues, partName, columnName); } @Override @@ -1312,23 +1419,24 @@ public Role getRole(String roleName) throws NoSuchObjectException { } @Override - public Partition getPartitionWithAuth(String dbName, String tblName, + public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { + catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getPartitionWithAuth(dbName, tblName, partVals, userName, groupNames); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); } - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.getPartitionWithAuth(dbName, tblName, partVals, userName, groupNames); + return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); } - Partition p = sharedCache.getPartitionFromCache(dbName, tblName, partVals); + Partition p = sharedCache.getPartitionFromCache(catName, dbName, tblName, partVals); if (p != null) { String partName = Warehouse.makePartName(table.getPartitionKeys(), partVals); - PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(dbName, tblName, partName, + PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames); p.setPrivileges(privs); } @@ -1336,25 +1444,26 @@ public Partition getPartitionWithAuth(String dbName, String tblName, } @Override - public List getPartitionsWithAuth(String dbName, String tblName, + public List getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { + catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getPartitionsWithAuth(dbName, tblName, maxParts, userName, groupNames); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); } - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.getPartitionsWithAuth(dbName, tblName, maxParts, userName, groupNames); + return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); } List partitions = new ArrayList<>(); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(dbName, tblName, maxParts)) { + for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) { if (maxParts == -1 || count < maxParts) { String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues()); - PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(dbName, tblName, partName, + PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames); part.setPrivileges(privs); partitions.add(part); @@ -1365,22 +1474,23 @@ public Partition getPartitionWithAuth(String dbName, String tblName, } @Override - public List listPartitionNamesPs(String dbName, String tblName, + public List listPartitionNamesPs(String catName, String dbName, String tblName, List partVals, short maxParts) throws MetaException, NoSuchObjectException { + catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.listPartitionNamesPs(dbName, tblName, partVals, maxParts); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts); } - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.listPartitionNamesPs(dbName, tblName, partVals, maxParts); + return rawStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts); } List partNames = new ArrayList<>(); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(dbName, tblName, maxParts)) { + for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) { boolean psMatch = true; for (int i=0;i listPartitionsPsWithAuth(String dbName, String tblName, + public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, List partVals, short maxParts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { + catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.listPartitionsPsWithAuth(dbName, tblName, partVals, maxParts, userName, + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, userName, groupNames); } - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.listPartitionsPsWithAuth(dbName, tblName, partVals, maxParts, userName, + return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, userName, groupNames); } List partitions = new ArrayList<>(); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(dbName, tblName, maxParts)) { + for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) { boolean psMatch = true; for (int i = 0; i < partVals.size(); i++) { String psVal = partVals.get(i); @@ -1435,7 +1546,7 @@ public Partition getPartitionWithAuth(String dbName, String tblName, if (maxParts == -1 || count < maxParts) { String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues()); PrincipalPrivilegeSet privs = - getPartitionPrivilegeSet(dbName, tblName, partName, userName, groupNames); + getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames); part.setPrivileges(privs); partitions.add(part); } @@ -1448,12 +1559,15 @@ public boolean updateTableColumnStatistics(ColumnStatistics colStats) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { boolean succ = rawStore.updateTableColumnStatistics(colStats); if (succ) { - String dbName = StringUtils.normalizeIdentifier(colStats.getStatsDesc().getDbName()); - String tblName = StringUtils.normalizeIdentifier(colStats.getStatsDesc().getTableName()); - if (!shouldCacheTable(dbName, tblName)) { + String catName = colStats.getStatsDesc().isSetCatName() ? + normalizeIdentifier(colStats.getStatsDesc().getCatName()) : + getDefaultCatalog(conf); + String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName()); + String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName()); + if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache return succ; @@ -1464,42 +1578,45 @@ public boolean updateTableColumnStatistics(ColumnStatistics colStats) colNames.add(statsObj.getColName()); } StatsSetupConst.setColumnStatsState(table.getParameters(), colNames); - sharedCache.alterTableInCache(dbName, tblName, table); - sharedCache.updateTableColStatsInCache(dbName, tblName, statsObjs); + sharedCache.alterTableInCache(catName, dbName, tblName, table); + sharedCache.updateTableColStatsInCache(catName, dbName, tblName, statsObjs); } return succ; } @Override - public ColumnStatistics getTableColumnStatistics(String dbName, String tblName, + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName, List colNames) throws MetaException, NoSuchObjectException { + catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getTableColumnStatistics(dbName, tblName, colNames); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); } - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.getTableColumnStatistics(dbName, tblName, colNames); + return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); } ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tblName); List colStatObjs = - sharedCache.getTableColStatsFromCache(dbName, tblName, colNames); + sharedCache.getTableColStatsFromCache(catName, dbName, tblName, colNames); return new ColumnStatistics(csd, colStatObjs); } @Override - public boolean deleteTableColumnStatistics(String dbName, String tblName, String colName) + public boolean deleteTableColumnStatistics(String catName, String dbName, String tblName, + String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.deleteTableColumnStatistics(dbName, tblName, colName); + boolean succ = rawStore.deleteTableColumnStatistics(catName, dbName, tblName, colName); if (succ) { - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - sharedCache.removeTableColStatsFromCache(dbName, tblName, colName); + sharedCache.removeTableColStatsFromCache(catName, dbName, tblName, colName); } return succ; } @@ -1509,65 +1626,69 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List statsObjs = colStats.getStatsObj(); - Partition part = getPartition(dbName, tblName, partVals); + Partition part = getPartition(catName, dbName, tblName, partVals); List colNames = new ArrayList<>(); for (ColumnStatisticsObj statsObj : statsObjs) { colNames.add(statsObj.getColName()); } StatsSetupConst.setColumnStatsState(part.getParameters(), colNames); - sharedCache.alterPartitionInCache(dbName, tblName, partVals, part); - sharedCache.updatePartitionColStatsInCache(dbName, tblName, partVals, colStats.getStatsObj()); + sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, part); + sharedCache.updatePartitionColStatsInCache(catName, dbName, tblName, partVals, colStats.getStatsObj()); } return succ; } @Override // TODO: calculate from cached values. - public List getPartitionColumnStatistics(String dbName, String tblName, + public List getPartitionColumnStatistics(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException { - return rawStore.getPartitionColumnStatistics(dbName, tblName, partNames, colNames); + return rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); } @Override - public boolean deletePartitionColumnStatistics(String dbName, String tblName, String partName, + public boolean deletePartitionColumnStatistics(String catName, String dbName, String tblName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { boolean succ = - rawStore.deletePartitionColumnStatistics(dbName, tblName, partName, partVals, colName); + rawStore.deletePartitionColumnStatistics(catName, dbName, tblName, partName, partVals, colName); if (succ) { - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - sharedCache.removePartitionColStatsFromCache(dbName, tblName, partVals, colName); + sharedCache.removePartitionColStatsFromCache(catName, dbName, tblName, partVals, colName); } return succ; } @Override - public AggrStats get_aggr_stats_for(String dbName, String tblName, List partNames, + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException { List colStats; + catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - rawStore.get_aggr_stats_for(dbName, tblName, partNames, colNames); + if (!shouldCacheTable(catName, dbName, tblName)) { + rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); } - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.get_aggr_stats_for(dbName, tblName, partNames, colNames); + return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); } - List allPartNames = rawStore.listPartitionNames(dbName, tblName, (short) -1); + List allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); if (partNames.size() == allPartNames.size()) { - colStats = sharedCache.getAggrStatsFromCache(dbName, tblName, colNames, StatsType.ALL); + colStats = sharedCache.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALL); if (colStats != null) { return new AggrStats(colStats, partNames.size()); } @@ -1575,7 +1696,7 @@ public AggrStats get_aggr_stats_for(String dbName, String tblName, List String defaultPartitionName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); if (!partNames.contains(defaultPartitionName)) { colStats = - sharedCache.getAggrStatsFromCache(dbName, tblName, colNames, StatsType.ALLBUTDEFAULT); + sharedCache.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALLBUTDEFAULT); if (colStats != null) { return new AggrStats(colStats, partNames.size()); } @@ -1584,30 +1705,29 @@ public AggrStats get_aggr_stats_for(String dbName, String tblName, List LOG.debug("Didn't find aggr stats in cache. Merging them. tblName= {}, parts= {}, cols= {}", tblName, partNames, colNames); MergedColumnStatsForPartitions mergedColStats = - mergeColStatsForPartitions(dbName, tblName, partNames, colNames, sharedCache); + mergeColStatsForPartitions(catName, dbName, tblName, partNames, colNames, sharedCache); return new AggrStats(mergedColStats.getColStats(), mergedColStats.getPartsFound()); } - private MergedColumnStatsForPartitions mergeColStatsForPartitions(String dbName, String tblName, - List partNames, List colNames, SharedCache sharedCache) throws MetaException { + private MergedColumnStatsForPartitions mergeColStatsForPartitions( + String catName, String dbName, String tblName, List partNames, List colNames, + SharedCache sharedCache) throws MetaException { final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION); final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); - Map> colStatsMap = - new HashMap>(); + Map> colStatsMap = new HashMap<>(); boolean areAllPartsFound = true; long partsFound = 0; for (String colName : colNames) { long partsFoundForColumn = 0; ColumnStatsAggregator colStatsAggregator = null; - List colStatsWithPartInfoList = - new ArrayList(); + List colStatsWithPartInfoList = new ArrayList<>(); for (String partName : partNames) { ColumnStatisticsObj colStatsForPart = - sharedCache.getPartitionColStatsFromCache(dbName, tblName, partNameToVals(partName), colName); + sharedCache.getPartitionColStatsFromCache(catName, dbName, tblName, partNameToVals(partName), colName); if (colStatsForPart != null) { ColStatsObjWithSourceInfo colStatsWithPartInfo = - new ColStatsObjWithSourceInfo(colStatsForPart, dbName, tblName, partName); + new ColStatsObjWithSourceInfo(colStatsForPart, catName, dbName, tblName, partName); colStatsWithPartInfoList.add(colStatsWithPartInfo); if (colStatsAggregator == null) { colStatsAggregator = ColumnStatsAggregatorFactory.getColumnStatsAggregator( @@ -1755,32 +1875,32 @@ public void setMetaStoreSchemaVersion(String version, String comment) } @Override - public List listDBGrantsAll(String dbName) { - return rawStore.listDBGrantsAll(dbName); + public List listDBGrantsAll(String catName, String dbName) { + return rawStore.listDBGrantsAll(catName, dbName); } @Override - public List listPartitionColumnGrantsAll(String dbName, + public List listPartitionColumnGrantsAll(String catName, String dbName, String tableName, String partitionName, String columnName) { - return rawStore.listPartitionColumnGrantsAll(dbName, tableName, partitionName, columnName); + return rawStore.listPartitionColumnGrantsAll(catName, dbName, tableName, partitionName, columnName); } @Override - public List listTableGrantsAll(String dbName, + public List listTableGrantsAll(String catName, String dbName, String tableName) { - return rawStore.listTableGrantsAll(dbName, tableName); + return rawStore.listTableGrantsAll(catName, dbName, tableName); } @Override - public List listPartitionGrantsAll(String dbName, + public List listPartitionGrantsAll(String catName, String dbName, String tableName, String partitionName) { - return rawStore.listPartitionGrantsAll(dbName, tableName, partitionName); + return rawStore.listPartitionGrantsAll(catName, dbName, tableName, partitionName); } @Override - public List listTableColumnGrantsAll(String dbName, + public List listTableColumnGrantsAll(String catName, String dbName, String tableName, String columnName) { - return rawStore.listTableColumnGrantsAll(dbName, tableName, columnName); + return rawStore.listTableColumnGrantsAll(catName, dbName, tableName, columnName); } @Override @@ -1791,37 +1911,37 @@ public void createFunction(Function func) } @Override - public void alterFunction(String dbName, String funcName, + public void alterFunction(String catName, String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException { // TODO fucntionCache - rawStore.alterFunction(dbName, funcName, newFunction); + rawStore.alterFunction(catName, dbName, funcName, newFunction); } @Override - public void dropFunction(String dbName, String funcName) throws MetaException, + public void dropFunction(String catName, String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { // TODO fucntionCache - rawStore.dropFunction(dbName, funcName); + rawStore.dropFunction(catName, dbName, funcName); } @Override - public Function getFunction(String dbName, String funcName) + public Function getFunction(String catName, String dbName, String funcName) throws MetaException { // TODO fucntionCache - return rawStore.getFunction(dbName, funcName); + return rawStore.getFunction(catName, dbName, funcName); } @Override - public List getAllFunctions() throws MetaException { + public List getAllFunctions(String catName) throws MetaException { // TODO fucntionCache - return rawStore.getAllFunctions(); + return rawStore.getAllFunctions(catName); } @Override - public List getFunctions(String dbName, String pattern) + public List getFunctions(String catName, String dbName, String pattern) throws MetaException { // TODO fucntionCache - return rawStore.getFunctions(dbName, pattern); + return rawStore.getFunctions(catName, dbName, pattern); } @Override @@ -1899,46 +2019,46 @@ public int getDatabaseCount() throws MetaException { } @Override - public List getPrimaryKeys(String db_name, String tbl_name) + public List getPrimaryKeys(String catName, String db_name, String tbl_name) throws MetaException { // TODO constraintCache - return rawStore.getPrimaryKeys(db_name, tbl_name); + return rawStore.getPrimaryKeys(catName, db_name, tbl_name); } @Override - public List getForeignKeys(String parent_db_name, + public List getForeignKeys(String catName, String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException { // TODO constraintCache - return rawStore.getForeignKeys(parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); + return rawStore.getForeignKeys(catName, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); } @Override - public List getUniqueConstraints(String db_name, String tbl_name) + public List getUniqueConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO constraintCache - return rawStore.getUniqueConstraints(db_name, tbl_name); + return rawStore.getUniqueConstraints(catName, db_name, tbl_name); } @Override - public List getNotNullConstraints(String db_name, String tbl_name) + public List getNotNullConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO constraintCache - return rawStore.getNotNullConstraints(db_name, tbl_name); + return rawStore.getNotNullConstraints(catName, db_name, tbl_name); } @Override - public List getDefaultConstraints(String db_name, String tbl_name) + public List getDefaultConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO constraintCache - return rawStore.getDefaultConstraints(db_name, tbl_name); + return rawStore.getDefaultConstraints(catName, db_name, tbl_name); } @Override - public List getCheckConstraints(String db_name, String tbl_name) + public List getCheckConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO constraintCache - return rawStore.getCheckConstraints(db_name, tbl_name); + return rawStore.getCheckConstraints(catName, db_name, tbl_name); } @Override @@ -1950,21 +2070,24 @@ public int getDatabaseCount() throws MetaException { // TODO constraintCache List constraintNames = rawStore.createTableWithConstraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); - String dbName = StringUtils.normalizeIdentifier(tbl.getDbName()); - String tblName = StringUtils.normalizeIdentifier(tbl.getTableName()); - if (!shouldCacheTable(dbName, tblName)) { + String dbName = normalizeIdentifier(tbl.getDbName()); + String tblName = normalizeIdentifier(tbl.getTableName()); + String catName = tbl.isSetCatName() ? normalizeIdentifier(tbl.getCatName()) : + DEFAULT_CATALOG_NAME; + if (!shouldCacheTable(catName, dbName, tblName)) { return constraintNames; } - sharedCache.addTableToCache(StringUtils.normalizeIdentifier(tbl.getDbName()), + sharedCache.addTableToCache(StringUtils.normalizeIdentifier(tbl.getCatName()), + StringUtils.normalizeIdentifier(tbl.getDbName()), StringUtils.normalizeIdentifier(tbl.getTableName()), tbl); return constraintNames; } @Override - public void dropConstraint(String dbName, String tableName, - String constraintName) throws NoSuchObjectException { + public void dropConstraint(String catName, String dbName, String tableName, + String constraintName, boolean missingOk) throws NoSuchObjectException { // TODO constraintCache - rawStore.dropConstraint(dbName, tableName, constraintName); + rawStore.dropConstraint(catName, dbName, tableName, constraintName, missingOk); } @Override @@ -2015,6 +2138,11 @@ public void createISchema(ISchema schema) rawStore.createISchema(schema); } + public List getPartitionColStatsForDatabase(String catName, String dbName) + throws MetaException, NoSuchObjectException { + return rawStore.getPartitionColStatsForDatabase(catName, dbName); + } + @Override public void alterISchema(ISchemaName schemaName, ISchema newSchema) throws NoSuchObjectException, MetaException { @@ -2209,8 +2337,8 @@ public long getCacheUpdateCount() { return sharedCache.getUpdateCount(); } - static boolean isNotInBlackList(String dbName, String tblName) { - String str = dbName + "." + tblName; + static boolean isNotInBlackList(String catName, String dbName, String tblName) { + String str = Warehouse.getCatalogQualifiedTableName(catName, dbName, tblName); for (Pattern pattern : blacklistPatterns) { LOG.debug("Trying to match: {} against blacklist pattern: {}", str, pattern); Matcher matcher = pattern.matcher(str); @@ -2223,8 +2351,8 @@ static boolean isNotInBlackList(String dbName, String tblName) { return true; } - static boolean isInWhitelist(String dbName, String tblName) { - String str = dbName + "." + tblName; + private static boolean isInWhitelist(String catName, String dbName, String tblName) { + String str = Warehouse.getCatalogQualifiedTableName(catName, dbName, tblName); for (Pattern pattern : whitelistPatterns) { LOG.debug("Trying to match: {} against whitelist pattern: {}", str, pattern); Matcher matcher = pattern.matcher(str); @@ -2249,12 +2377,12 @@ static void setBlacklistPattern(List patterns) { // Determines if we should cache a table (& its partitions, stats etc), // based on whitelist/blacklist - static boolean shouldCacheTable(String dbName, String tblName) { - if (!isNotInBlackList(dbName, tblName)) { + static boolean shouldCacheTable(String catName, String dbName, String tblName) { + if (!isNotInBlackList(catName, dbName, tblName)) { LOG.debug("{}.{} is in blacklist, skipping", dbName, tblName); return false; } - if (!isInWhitelist(dbName, tblName)) { + if (!isInWhitelist(catName, dbName, tblName)) { LOG.debug("{}.{} is not in whitelist, skipping", dbName, tblName); return false; } @@ -2275,4 +2403,10 @@ static boolean isBlacklistWhitelistEmpty(Configuration conf) { .equals(".*") && MetastoreConf.getAsString(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_BLACKLIST).isEmpty(); } + + @VisibleForTesting + void resetCatalogCache() { + sharedCache.resetCatalogCache(); + setCachePrewarmedState(false); + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java index cf92eda373..89b400697b 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java @@ -20,6 +20,7 @@ import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -30,11 +31,15 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.TreeMap; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -50,29 +55,38 @@ import com.google.common.annotations.VisibleForTesting; +import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; + public class SharedCache { private static ReentrantReadWriteLock cacheLock = new ReentrantReadWriteLock(true); + private boolean isCatalogCachePrewarmed = false; + private Map catalogCache = new TreeMap<>(); + private HashSet catalogsDeletedDuringPrewarm = new HashSet<>(); + private AtomicBoolean isCatalogCacheDirty = new AtomicBoolean(false); + // For caching Database objects. Key is database name - private Map databaseCache = new ConcurrentHashMap(); + private Map databaseCache = new TreeMap<>(); private boolean isDatabaseCachePrewarmed = false; - private HashSet databasesDeletedDuringPrewarm = new HashSet(); + private HashSet databasesDeletedDuringPrewarm = new HashSet<>(); private AtomicBoolean isDatabaseCacheDirty = new AtomicBoolean(false); + // For caching TableWrapper objects. Key is aggregate of database name and table name - private Map tableCache = new ConcurrentHashMap(); + private Map tableCache = new TreeMap<>(); private boolean isTableCachePrewarmed = false; - private HashSet tablesDeletedDuringPrewarm = new HashSet(); + private HashSet tablesDeletedDuringPrewarm = new HashSet<>(); private AtomicBoolean isTableCacheDirty = new AtomicBoolean(false); - private Map sdCache = new ConcurrentHashMap<>(); + private Map sdCache = new HashMap<>(); + private Configuration conf; private static MessageDigest md; static final private Logger LOG = LoggerFactory.getLogger(SharedCache.class.getName()); private AtomicLong cacheUpdateCount = new AtomicLong(0); - static enum StatsType { + enum StatsType { ALL(0), ALLBUTDEFAULT(1); private final int position; - private StatsType(int position) { + StatsType(int position) { this.position = position; } @@ -155,6 +169,10 @@ public void setParameters(Map parameters) { this.parameters = parameters; } + boolean sameDatabase(String catName, String dbName) { + return catName.equals(t.getCatName()) && dbName.equals(t.getDbName()); + } + void cachePartition(Partition part, SharedCache sharedCache) { try { tableLock.writeLock().lock(); @@ -669,12 +687,102 @@ public int getRefCount() { } } - public Database getDatabaseFromCache(String name) { + public void populateCatalogsInCache(Collection catalogs) { + for (Catalog cat : catalogs) { + Catalog catCopy = cat.deepCopy(); + // ObjectStore also stores db name in lowercase + catCopy.setName(catCopy.getName().toLowerCase()); + try { + cacheLock.writeLock().lock(); + // Since we allow write operations on cache while prewarm is happening: + // 1. Don't add databases that were deleted while we were preparing list for prewarm + // 2. Skip overwriting exisiting db object + // (which is present because it was added after prewarm started) + if (catalogsDeletedDuringPrewarm.contains(catCopy.getName())) { + continue; + } + catalogCache.putIfAbsent(catCopy.getName(), catCopy); + catalogsDeletedDuringPrewarm.clear(); + isCatalogCachePrewarmed = true; + } finally { + cacheLock.writeLock().unlock(); + } + } + } + + public Catalog getCatalogFromCache(String name) { + Catalog cat = null; + try { + cacheLock.readLock().lock(); + if (catalogCache.get(name) != null) { + cat = catalogCache.get(name).deepCopy(); + } + } finally { + cacheLock.readLock().unlock(); + } + return cat; + } + + public void addCatalogToCache(Catalog cat) { + try { + cacheLock.writeLock().lock(); + Catalog catCopy = cat.deepCopy(); + // ObjectStore also stores db name in lowercase + catCopy.setName(catCopy.getName().toLowerCase()); + catalogCache.put(cat.getName(), catCopy); + isCatalogCacheDirty.set(true); + } finally { + cacheLock.writeLock().unlock(); + } + } + + public void alterCatalogInCache(String catName, Catalog newCat) { + try { + cacheLock.writeLock().lock(); + removeCatalogFromCache(catName); + addCatalogToCache(newCat.deepCopy()); + } finally { + cacheLock.writeLock().unlock(); + } + } + + public void removeCatalogFromCache(String name) { + name = normalizeIdentifier(name); + try { + cacheLock.writeLock().lock(); + // If db cache is not yet prewarmed, add this to a set which the prewarm thread can check + // so that the prewarm thread does not add it back + if (!isCatalogCachePrewarmed) { + catalogsDeletedDuringPrewarm.add(name); + } + if (catalogCache.remove(name) != null) { + isCatalogCacheDirty.set(true); + } + } finally { + cacheLock.writeLock().unlock(); + } + } + + public List listCachedCatalogs() { + try { + cacheLock.readLock().lock(); + return new ArrayList<>(catalogCache.keySet()); + } finally { + cacheLock.readLock().unlock(); + } + } + + public boolean isCatalogCachePrewarmed() { + return isCatalogCachePrewarmed; + } + + public Database getDatabaseFromCache(String catName, String name) { Database db = null; try { cacheLock.readLock().lock(); - if (databaseCache.get(name) != null) { - db = databaseCache.get(name).deepCopy(); + String key = CacheUtils.buildDbKey(catName, name); + if (databaseCache.get(key) != null) { + db = databaseCache.get(key).deepCopy(); } } finally { cacheLock.readLock().unlock(); @@ -693,10 +801,11 @@ public void populateDatabasesInCache(List databases) { // 1. Don't add databases that were deleted while we were preparing list for prewarm // 2. Skip overwriting exisiting db object // (which is present because it was added after prewarm started) - if (databasesDeletedDuringPrewarm.contains(dbCopy.getName().toLowerCase())) { + String key = CacheUtils.buildDbKey(dbCopy.getCatalogName().toLowerCase(), dbCopy.getName().toLowerCase()); + if (databasesDeletedDuringPrewarm.contains(key)) { continue; } - databaseCache.putIfAbsent(StringUtils.normalizeIdentifier(dbCopy.getName()), dbCopy); + databaseCache.putIfAbsent(key, dbCopy); databasesDeletedDuringPrewarm.clear(); isDatabaseCachePrewarmed = true; } finally { @@ -715,22 +824,24 @@ public void addDatabaseToCache(Database db) { Database dbCopy = db.deepCopy(); // ObjectStore also stores db name in lowercase dbCopy.setName(dbCopy.getName().toLowerCase()); - databaseCache.put(StringUtils.normalizeIdentifier(dbCopy.getName()), dbCopy); + dbCopy.setCatalogName(dbCopy.getCatalogName().toLowerCase()); + databaseCache.put(CacheUtils.buildDbKey(dbCopy.getCatalogName(), dbCopy.getName()), dbCopy); isDatabaseCacheDirty.set(true); } finally { cacheLock.writeLock().unlock(); } } - public void removeDatabaseFromCache(String dbName) { + public void removeDatabaseFromCache(String catName, String dbName) { try { cacheLock.writeLock().lock(); // If db cache is not yet prewarmed, add this to a set which the prewarm thread can check // so that the prewarm thread does not add it back + String key = CacheUtils.buildDbKey(catName, dbName); if (!isDatabaseCachePrewarmed) { - databasesDeletedDuringPrewarm.add(dbName.toLowerCase()); + databasesDeletedDuringPrewarm.add(key); } - if (databaseCache.remove(dbName) != null) { + if (databaseCache.remove(key) != null) { isDatabaseCacheDirty.set(true); } } finally { @@ -738,25 +849,31 @@ public void removeDatabaseFromCache(String dbName) { } } - public List listCachedDatabases() { + public List listCachedDatabases(String catName) { List results = new ArrayList<>(); try { cacheLock.readLock().lock(); - results.addAll(databaseCache.keySet()); + for (String pair : databaseCache.keySet()) { + String[] n = CacheUtils.splitDbName(pair); + if (catName.equals(n[0])) results.add(n[1]); + } } finally { cacheLock.readLock().unlock(); } return results; } - public List listCachedDatabases(String pattern) { + public List listCachedDatabases(String catName, String pattern) { List results = new ArrayList<>(); try { cacheLock.readLock().lock(); - for (String dbName : databaseCache.keySet()) { - dbName = StringUtils.normalizeIdentifier(dbName); - if (CacheUtils.matches(dbName, pattern)) { - results.add(dbName); + for (String pair : databaseCache.keySet()) { + String[] n = CacheUtils.splitDbName(pair); + if (catName.equals(n[0])) { + n[1] = StringUtils.normalizeIdentifier(n[1]); + if (CacheUtils.matches(n[1], pattern)) { + results.add(n[1]); + } } } } finally { @@ -768,13 +885,11 @@ public void removeDatabaseFromCache(String dbName) { /** * Replaces the old db object with the new one. * This will add the new database to cache if it does not exist. - * @param dbName - * @param newDb */ - public void alterDatabaseInCache(String dbName, Database newDb) { + public void alterDatabaseInCache(String catName, String dbName, Database newDb) { try { cacheLock.writeLock().lock(); - removeDatabaseFromCache(dbName); + removeDatabaseFromCache(catName, dbName); addDatabaseToCache(newDb.deepCopy()); isDatabaseCacheDirty.set(true); } finally { @@ -810,14 +925,15 @@ public int getCachedDatabaseCount() { public void populateTableInCache(Table table, ColumnStatistics tableColStats, List partitions, List partitionColStats, AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { + String catName = StringUtils.normalizeIdentifier(table.getCatName()); String dbName = StringUtils.normalizeIdentifier(table.getDbName()); String tableName = StringUtils.normalizeIdentifier(table.getTableName()); // Since we allow write operations on cache while prewarm is happening: // 1. Don't add tables that were deleted while we were preparing list for prewarm - if (tablesDeletedDuringPrewarm.contains(CacheUtils.buildTableCacheKey(dbName, tableName))) { + if (tablesDeletedDuringPrewarm.contains(CacheUtils.buildTableKey(catName, dbName, tableName))) { return; } - TableWrapper tblWrapper = createTableWrapper(dbName, tableName, table); + TableWrapper tblWrapper = createTableWrapper(catName, dbName, tableName, table); if (!table.isSetPartitionKeys() && (tableColStats != null)) { tblWrapper.updateTableColStats(tableColStats.getStatsObj()); } else { @@ -843,12 +959,14 @@ public void populateTableInCache(Table table, ColumnStatistics tableColStats, cacheLock.writeLock().lock(); // 2. Skip overwriting exisiting table object // (which is present because it was added after prewarm started) - tableCache.putIfAbsent(CacheUtils.buildTableCacheKey(dbName, tableName), tblWrapper); + tableCache.putIfAbsent(CacheUtils.buildTableKey(catName, dbName, tableName), tblWrapper); } finally { cacheLock.writeLock().unlock(); } } + + public void completeTableCachePrewarm() { try { cacheLock.writeLock().lock(); @@ -859,11 +977,11 @@ public void completeTableCachePrewarm() { } } - public Table getTableFromCache(String dbName, String tableName) { + public Table getTableFromCache(String catName, String dbName, String tableName) { Table t = null; try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tableName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { t = CacheUtils.assemble(tblWrapper, this); } @@ -873,11 +991,11 @@ public Table getTableFromCache(String dbName, String tableName) { return t; } - public TableWrapper addTableToCache(String dbName, String tblName, Table tbl) { + public TableWrapper addTableToCache(String catName, String dbName, String tblName, Table tbl) { try { cacheLock.writeLock().lock(); - TableWrapper wrapper = createTableWrapper(dbName, tblName, tbl); - tableCache.put(CacheUtils.buildTableCacheKey(dbName, tblName), wrapper); + TableWrapper wrapper = createTableWrapper(catName, dbName, tblName, tbl); + tableCache.put(CacheUtils.buildTableKey(catName, dbName, tblName), wrapper); isTableCacheDirty.set(true); return wrapper; } finally { @@ -885,14 +1003,15 @@ public TableWrapper addTableToCache(String dbName, String tblName, Table tbl) { } } - private TableWrapper createTableWrapper(String dbName, String tblName, Table tbl) { + private TableWrapper createTableWrapper(String catName, String dbName, String tblName, Table tbl) { TableWrapper wrapper; Table tblCopy = tbl.deepCopy(); - tblCopy.setDbName(StringUtils.normalizeIdentifier(dbName)); - tblCopy.setTableName(StringUtils.normalizeIdentifier(tblName)); + tblCopy.setCatName(normalizeIdentifier(catName)); + tblCopy.setDbName(normalizeIdentifier(dbName)); + tblCopy.setTableName(normalizeIdentifier(tblName)); if (tblCopy.getPartitionKeys() != null) { for (FieldSchema fs : tblCopy.getPartitionKeys()) { - fs.setName(StringUtils.normalizeIdentifier(fs.getName())); + fs.setName(normalizeIdentifier(fs.getName())); } } if (tbl.getSd() != null) { @@ -907,15 +1026,16 @@ private TableWrapper createTableWrapper(String dbName, String tblName, Table tbl return wrapper; } - public void removeTableFromCache(String dbName, String tblName) { + + public void removeTableFromCache(String catName, String dbName, String tblName) { try { cacheLock.writeLock().lock(); // If table cache is not yet prewarmed, add this to a set which the prewarm thread can check // so that the prewarm thread does not add it back if (!isTableCachePrewarmed) { - tablesDeletedDuringPrewarm.add(CacheUtils.buildTableCacheKey(dbName, tblName)); + tablesDeletedDuringPrewarm.add(CacheUtils.buildTableKey(catName, dbName, tblName)); } - TableWrapper tblWrapper = tableCache.remove(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.remove(CacheUtils.buildTableKey(catName, dbName, tblName)); byte[] sdHash = tblWrapper.getSdHash(); if (sdHash != null) { decrSd(sdHash); @@ -926,15 +1046,15 @@ public void removeTableFromCache(String dbName, String tblName) { } } - public void alterTableInCache(String dbName, String tblName, Table newTable) { + public void alterTableInCache(String catName, String dbName, String tblName, Table newTable) { try { cacheLock.writeLock().lock(); - TableWrapper tblWrapper = tableCache.remove(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.remove(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.updateTableObj(newTable, this); String newDbName = StringUtils.normalizeIdentifier(newTable.getDbName()); String newTblName = StringUtils.normalizeIdentifier(newTable.getTableName()); - tableCache.put(CacheUtils.buildTableCacheKey(newDbName, newTblName), tblWrapper); + tableCache.put(CacheUtils.buildTableKey(catName, newDbName, newTblName), tblWrapper); isTableCacheDirty.set(true); } } finally { @@ -942,12 +1062,12 @@ public void alterTableInCache(String dbName, String tblName, Table newTable) { } } - public List
listCachedTables(String dbName) { + public List
listCachedTables(String catName, String dbName) { List
tables = new ArrayList<>(); try { cacheLock.readLock().lock(); for (TableWrapper wrapper : tableCache.values()) { - if (wrapper.getTable().getDbName().equals(dbName)) { + if (wrapper.sameDatabase(catName, dbName)) { tables.add(CacheUtils.assemble(wrapper, this)); } } @@ -957,12 +1077,12 @@ public void alterTableInCache(String dbName, String tblName, Table newTable) { return tables; } - public List listCachedTableNames(String dbName) { + public List listCachedTableNames(String catName, String dbName) { List tableNames = new ArrayList<>(); try { cacheLock.readLock().lock(); for (TableWrapper wrapper : tableCache.values()) { - if (wrapper.getTable().getDbName().equals(dbName)) { + if (wrapper.sameDatabase(catName, dbName)) { tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); } } @@ -972,13 +1092,13 @@ public void alterTableInCache(String dbName, String tblName, Table newTable) { return tableNames; } - public List listCachedTableNames(String dbName, String pattern, short maxTables) { - List tableNames = new ArrayList(); + public List listCachedTableNames(String catName, String dbName, String pattern, short maxTables) { + List tableNames = new ArrayList<>(); try { cacheLock.readLock().lock(); int count = 0; for (TableWrapper wrapper : tableCache.values()) { - if ((wrapper.getTable().getDbName().equals(dbName)) + if (wrapper.sameDatabase(catName, dbName) && CacheUtils.matches(wrapper.getTable().getTableName(), pattern) && (maxTables == -1 || count < maxTables)) { tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); @@ -991,12 +1111,12 @@ public void alterTableInCache(String dbName, String tblName, Table newTable) { return tableNames; } - public List listCachedTableNames(String dbName, String pattern, TableType tableType) { - List tableNames = new ArrayList(); + public List listCachedTableNames(String catName, String dbName, String pattern, TableType tableType) { + List tableNames = new ArrayList<>(); try { cacheLock.readLock().lock(); for (TableWrapper wrapper : tableCache.values()) { - if ((wrapper.getTable().getDbName().equals(dbName)) + if (wrapper.sameDatabase(catName, dbName) && CacheUtils.matches(wrapper.getTable().getTableName(), pattern) && wrapper.getTable().getTableType().equals(tableType.toString())) { tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); @@ -1008,23 +1128,23 @@ public void alterTableInCache(String dbName, String tblName, Table newTable) { return tableNames; } - public void refreshTablesInCache(String dbName, List
tables) { + public void refreshTablesInCache(String catName, String dbName, List
tables) { try { cacheLock.writeLock().lock(); if (isTableCacheDirty.compareAndSet(true, false)) { LOG.debug("Skipping table cache update; the table list we have is dirty."); return; } - Map newTableCache = new HashMap(); + Map newTableCache = new HashMap<>(); for (Table tbl : tables) { String tblName = StringUtils.normalizeIdentifier(tbl.getTableName()); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.updateTableObj(tbl, this); } else { - tblWrapper = createTableWrapper(dbName, tblName, tbl); + tblWrapper = createTableWrapper(catName, dbName, tblName, tbl); } - newTableCache.put(CacheUtils.buildTableCacheKey(dbName, tblName), tblWrapper); + newTableCache.put(CacheUtils.buildTableKey(catName, dbName, tblName), tblWrapper); } tableCache.clear(); tableCache = newTableCache; @@ -1033,12 +1153,12 @@ public void refreshTablesInCache(String dbName, List
tables) { } } - public List getTableColStatsFromCache(String dbName, String tblName, - List colNames) { - List colStatObjs = new ArrayList(); + public List getTableColStatsFromCache( + String catName, String dbName, String tblName, List colNames) { + List colStatObjs = new ArrayList<>(); try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { colStatObjs = tblWrapper.getCachedTableColStats(colNames); } @@ -1048,10 +1168,10 @@ public void refreshTablesInCache(String dbName, List
tables) { return colStatObjs; } - public void removeTableColStatsFromCache(String dbName, String tblName, String colName) { + public void removeTableColStatsFromCache(String catName, String dbName, String tblName, String colName) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.removeTableColStats(colName); } @@ -1060,11 +1180,11 @@ public void removeTableColStatsFromCache(String dbName, String tblName, String c } } - public void updateTableColStatsInCache(String dbName, String tableName, - List colStatsForTable) { + public void updateTableColStatsInCache(String catName, String dbName, String tableName, + List colStatsForTable) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tableName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { tblWrapper.updateTableColStats(colStatsForTable); } @@ -1073,11 +1193,11 @@ public void updateTableColStatsInCache(String dbName, String tableName, } } - public void refreshTableColStatsInCache(String dbName, String tableName, - List colStatsForTable) { + public void refreshTableColStatsInCache(String catName, String dbName, String tableName, + List colStatsForTable) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tableName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { tblWrapper.refreshTableColStats(colStatsForTable); } @@ -1095,18 +1215,19 @@ public int getCachedTableCount() { } } - public List getTableMeta(String dbNames, String tableNames, - List tableTypes) { + public List getTableMeta(String catName, String dbNames, String tableNames, + List tableTypes) { List tableMetas = new ArrayList<>(); try { cacheLock.readLock().lock(); - for (String dbName : listCachedDatabases()) { + for (String dbName : listCachedDatabases(catName)) { if (CacheUtils.matches(dbName, dbNames)) { - for (Table table : listCachedTables(dbName)) { + for (Table table : listCachedTables(catName, dbName)) { if (CacheUtils.matches(table.getTableName(), tableNames)) { if (tableTypes == null || tableTypes.contains(table.getTableType())) { TableMeta metaData = new TableMeta(dbName, table.getTableName(), table.getTableType()); + metaData.setCatName(catName); metaData.setComments(table.getParameters().get("comment")); tableMetas.add(metaData); } @@ -1120,10 +1241,10 @@ public int getCachedTableCount() { return tableMetas; } - public void addPartitionToCache(String dbName, String tblName, Partition part) { + public void addPartitionToCache(String catName, String dbName, String tblName, Partition part) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.cachePartition(part, this); } @@ -1132,10 +1253,10 @@ public void addPartitionToCache(String dbName, String tblName, Partition part) { } } - public void addPartitionsToCache(String dbName, String tblName, List parts) { + public void addPartitionsToCache(String catName, String dbName, String tblName, List parts) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.cachePartitions(parts, this); } @@ -1144,12 +1265,12 @@ public void addPartitionsToCache(String dbName, String tblName, List } } - public Partition getPartitionFromCache(String dbName, String tblName, - List partVals) { + public Partition getPartitionFromCache(String catName, String dbName, String tblName, + List partVals) { Partition part = null; try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { part = tblWrapper.getPartition(partVals, this); } @@ -1159,11 +1280,11 @@ public Partition getPartitionFromCache(String dbName, String tblName, return part; } - public boolean existPartitionFromCache(String dbName, String tblName, List partVals) { + public boolean existPartitionFromCache(String catName, String dbName, String tblName, List partVals) { boolean existsPart = false; try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { existsPart = tblWrapper.containsPartition(partVals); } @@ -1173,12 +1294,12 @@ public boolean existPartitionFromCache(String dbName, String tblName, List partVals) { + public Partition removePartitionFromCache(String catName, String dbName, String tblName, + List partVals) { Partition part = null; try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { part = tblWrapper.removePartition(partVals, this); } @@ -1188,11 +1309,11 @@ public Partition removePartitionFromCache(String dbName, String tblName, return part; } - public void removePartitionsFromCache(String dbName, String tblName, - List> partVals) { + public void removePartitionsFromCache(String catName, String dbName, String tblName, + List> partVals) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.removePartitions(partVals, this); } @@ -1201,11 +1322,11 @@ public void removePartitionsFromCache(String dbName, String tblName, } } - public List listCachedPartitions(String dbName, String tblName, int max) { + public List listCachedPartitions(String catName, String dbName, String tblName, int max) { List parts = new ArrayList(); try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { parts = tblWrapper.listPartitions(max, this); } @@ -1215,11 +1336,11 @@ public void removePartitionsFromCache(String dbName, String tblName, return parts; } - public void alterPartitionInCache(String dbName, String tblName, List partVals, - Partition newPart) { + public void alterPartitionInCache(String catName, String dbName, String tblName, List partVals, + Partition newPart) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.alterPartition(partVals, newPart, this); } @@ -1228,11 +1349,11 @@ public void alterPartitionInCache(String dbName, String tblName, List pa } } - public void alterPartitionsInCache(String dbName, String tblName, List> partValsList, - List newParts) { + public void alterPartitionsInCache(String catName, String dbName, String tblName, List> partValsList, + List newParts) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.alterPartitions(partValsList, newParts, this); } @@ -1241,10 +1362,10 @@ public void alterPartitionsInCache(String dbName, String tblName, List partitions) { + public void refreshPartitionsInCache(String catName, String dbName, String tblName, List partitions) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.refreshPartitions(partitions, this); } @@ -1253,11 +1374,11 @@ public void refreshPartitionsInCache(String dbName, String tblName, List partVals, String colName) { + public void removePartitionColStatsFromCache(String catName, String dbName, String tblName, + List partVals, String colName) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.removePartitionColStats(partVals, colName); } @@ -1266,11 +1387,11 @@ public void removePartitionColStatsFromCache(String dbName, String tblName, } } - public void updatePartitionColStatsInCache(String dbName, String tableName, - List partVals, List colStatsObjs) { + public void updatePartitionColStatsInCache(String catName, String dbName, String tableName, + List partVals, List colStatsObjs) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tableName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { tblWrapper.updatePartitionColStats(partVals, colStatsObjs); } @@ -1279,12 +1400,12 @@ public void updatePartitionColStatsInCache(String dbName, String tableName, } } - public ColumnStatisticsObj getPartitionColStatsFromCache(String dbName, String tblName, - List partVal, String colName) { + public ColumnStatisticsObj getPartitionColStatsFromCache(String catName, String dbName, String tblName, + List partVal, String colName) { ColumnStatisticsObj colStatObj = null; try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null){ colStatObj = tblWrapper.getPartitionColStats(partVal, colName); } @@ -1294,11 +1415,11 @@ public ColumnStatisticsObj getPartitionColStatsFromCache(String dbName, String t return colStatObj; } - public void refreshPartitionColStatsInCache(String dbName, String tblName, - List partitionColStats) { + public void refreshPartitionColStatsInCache(String catName, String dbName, String tblName, + List partitionColStats) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.refreshPartitionColStats(partitionColStats); } @@ -1307,11 +1428,11 @@ public void refreshPartitionColStatsInCache(String dbName, String tblName, } } - public List getAggrStatsFromCache(String dbName, String tblName, - List colNames, StatsType statsType) { + public List getAggrStatsFromCache(String catName, String dbName, String tblName, + List colNames, StatsType statsType) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { return tblWrapper.getAggrPartitionColStats(colNames, statsType); } @@ -1321,11 +1442,11 @@ public void refreshPartitionColStatsInCache(String dbName, String tblName, return null; } - public void addAggregateStatsToCache(String dbName, String tblName, - AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { + public void addAggregateStatsToCache(String catName, String dbName, String tblName, + AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null){ tblWrapper.cacheAggrPartitionColStats(aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); @@ -1335,11 +1456,11 @@ public void addAggregateStatsToCache(String dbName, String tblName, } } - public void refreshAggregateStatsInCache(String dbName, String tblName, - AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { + public void refreshAggregateStatsInCache(String catName, String dbName, String tblName, + AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.refreshAggrPartitionColStats(aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); @@ -1390,6 +1511,16 @@ public synchronized StorageDescriptor getSdFromCache(byte[] sdHash) { return sdCache; } + /** + * This resets the contents of the cataog cache so that we can re-fill it in another test. + */ + void resetCatalogCache() { + isCatalogCachePrewarmed = false; + catalogCache.clear(); + catalogsDeletedDuringPrewarm.clear(); + isCatalogCacheDirty.set(false); + } + public long getUpdateCount() { return cacheUpdateCount.get(); } @@ -1398,3 +1529,8 @@ public void incrementUpdateCount() { cacheUpdateCount.incrementAndGet(); } } + + + + + diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/CatalogBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/CatalogBuilder.java new file mode 100644 index 0000000000..be76d937b7 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/CatalogBuilder.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client.builder; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.thrift.TException; + +public class CatalogBuilder { + private String name, description, location; + + public CatalogBuilder setName(String name) { + this.name = name; + return this; + } + + public CatalogBuilder setDescription(String description) { + this.description = description; + return this; + } + + public CatalogBuilder setLocation(String location) { + this.location = location; + return this; + } + + public Catalog build() throws MetaException { + if (name == null) throw new MetaException("You must name the catalog"); + if (location == null) throw new MetaException("You must give the catalog a location"); + Catalog catalog = new Catalog(name, location); + if (description != null) catalog.setDescription(description); + return catalog; + } + + /** + * Build the catalog object and create it in the metastore. + * @param client metastore client + * @return new catalog object + * @throws TException thrown from the client + */ + public Catalog create(IMetaStoreClient client) throws TException { + Catalog cat = build(); + client.createCatalog(cat); + return cat; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java index 50e779a22b..2e32cbf3c4 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java @@ -17,8 +17,15 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; + +import java.util.ArrayList; +import java.util.List; /** * Base builder for all types of constraints. Database name, table name, and column name @@ -26,28 +33,42 @@ * @param Type of builder extending this. */ abstract class ConstraintBuilder { - protected String dbName, tableName, columnName, constraintName; - protected int keySeq; + protected String catName, dbName, tableName, constraintName; + List columns; protected boolean enable, validate, rely; + private int nextSeq; private T child; protected ConstraintBuilder() { - keySeq = 1; + nextSeq = 1; enable = true; validate = rely = false; + dbName = Warehouse.DEFAULT_DATABASE_NAME; + columns = new ArrayList<>(); } protected void setChild(T child) { this.child = child; } - protected void checkBuildable(String defaultConstraintName) throws MetaException { - if (dbName == null || tableName == null || columnName == null) { - throw new MetaException("You must provide database name, table name, and column name"); + protected void checkBuildable(String defaultConstraintName, Configuration conf) + throws MetaException { + if (tableName == null || columns.isEmpty()) { + throw new MetaException("You must provide table name and columns"); } if (constraintName == null) { - constraintName = dbName + "_" + tableName + "_" + columnName + "_" + defaultConstraintName; + constraintName = tableName + "_" + defaultConstraintName; } + if (catName == null) catName = MetaStoreUtils.getDefaultCatalog(conf); + } + + protected int getNextSeq() { + return nextSeq++; + } + + public T setCatName(String catName) { + this.catName = catName; + return child; } public T setDbName(String dbName) { @@ -60,14 +81,15 @@ public T setTableName(String tableName) { return child; } - public T setDbAndTableName(Table table) { + public T onTable(Table table) { + this.catName = table.getCatName(); this.dbName = table.getDbName(); this.tableName = table.getTableName(); return child; } - public T setColumnName(String columnName) { - this.columnName = columnName; + public T addColumn(String columnName) { + this.columns.add(columnName); return child; } @@ -76,11 +98,6 @@ public T setConstraintName(String constraintName) { return child; } - public T setKeySeq(int keySeq) { - this.keySeq = keySeq; - return child; - } - public T setEnable(boolean enable) { this.enable = enable; return child; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java index 01693ec0bc..f3d2182a04 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.PrincipalType; @@ -33,11 +37,24 @@ * selects reasonable defaults. */ public class DatabaseBuilder { - private String name, description, location; + private String name, description, location, catalogName; private Map params = new HashMap<>(); private String ownerName; private PrincipalType ownerType; + public DatabaseBuilder() { + } + + public DatabaseBuilder setCatalogName(String catalogName) { + this.catalogName = catalogName; + return this; + } + + public DatabaseBuilder setCatalogName(Catalog catalog) { + this.catalogName = catalog.getName(); + return this; + } + public DatabaseBuilder setName(String name) { this.name = name; return this; @@ -73,11 +90,13 @@ public DatabaseBuilder setOwnerType(PrincipalType ownerType) { return this; } - public Database build() throws MetaException { + public Database build(Configuration conf) throws MetaException { if (name == null) throw new MetaException("You must name the database"); + if (catalogName == null) catalogName = MetaStoreUtils.getDefaultCatalog(conf); Database db = new Database(name, description, location, params); + db.setCatalogName(catalogName); try { - if (ownerName != null) ownerName = SecurityUtils.getUser(); + if (ownerName == null) ownerName = SecurityUtils.getUser(); db.setOwnerName(ownerName); if (ownerType == null) ownerType = PrincipalType.USER; db.setOwnerType(ownerType); @@ -86,4 +105,18 @@ public Database build() throws MetaException { throw MetaStoreUtils.newMetaException(e); } } + + /** + * Build the database, create it in the metastore, and then return the db object. + * @param client metastore client + * @param conf configuration file + * @return new database object + * @throws TException comes from {@link #build(Configuration)} or + * {@link IMetaStoreClient#createDatabase(Database)}. + */ + public Database create(IMetaStoreClient client, Configuration conf) throws TException { + Database db = build(conf); + client.createDatabase(db); + return db; + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java index aa9b9f5b62..c4c09dcd4f 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.FunctionType; @@ -26,6 +29,7 @@ import org.apache.hadoop.hive.metastore.api.ResourceUri; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.SecurityUtils; +import org.apache.thrift.TException; import java.io.IOException; import java.util.ArrayList; @@ -35,7 +39,7 @@ * Class for creating Thrift Function objects for tests, and API usage. */ public class FunctionBuilder { - private String dbName = "default"; + private String catName, dbName; private String funcName = null; private String className = null; private String owner = null; @@ -49,7 +53,13 @@ public FunctionBuilder() { ownerType = PrincipalType.USER; createTime = (int) (System.currentTimeMillis() / 1000); funcType = FunctionType.JAVA; - resourceUris = new ArrayList(); + resourceUris = new ArrayList<>(); + dbName = Warehouse.DEFAULT_DATABASE_NAME; + } + + public FunctionBuilder setCatName(String catName) { + this.catName = catName; + return this; } public FunctionBuilder setDbName(String dbName) { @@ -57,8 +67,9 @@ public FunctionBuilder setDbName(String dbName) { return this; } - public FunctionBuilder setDbName(Database db) { + public FunctionBuilder inDb(Database db) { this.dbName = db.getName(); + this.catName = db.getCatalogName(); return this; } @@ -102,7 +113,7 @@ public FunctionBuilder addResourceUri(ResourceUri resourceUri) { return this; } - public Function build() throws MetaException { + public Function build(Configuration conf) throws MetaException { try { if (owner != null) { owner = SecurityUtils.getUser(); @@ -110,7 +121,23 @@ public Function build() throws MetaException { } catch (IOException e) { throw MetaStoreUtils.newMetaException(e); } - return new Function(funcName, dbName, className, owner, ownerType, createTime, funcType, + if (catName == null) catName = MetaStoreUtils.getDefaultCatalog(conf); + Function f = new Function(funcName, dbName, className, owner, ownerType, createTime, funcType, resourceUris); + f.setCatName(catName); + return f; + } + + /** + * Create the function object in the metastore and return it. + * @param client metastore client + * @param conf configuration + * @return new function object + * @throws TException if thrown by build or the client. + */ + public Function create(IMetaStoreClient client, Configuration conf) throws TException { + Function f = build(conf); + client.createFunction(f); + return f; } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java index 32a84acf3a..f61a62c2e3 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.metastore.client.builder; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.ISchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.SchemaCompatibility; @@ -27,7 +28,7 @@ public class ISchemaBuilder { private SchemaType schemaType; // required private String name; // required - private String dbName; // required + private String dbName, catName; // required private SchemaCompatibility compatibility; // required private SchemaValidation validationLevel; // required private boolean canEvolve; // required @@ -39,6 +40,7 @@ public ISchemaBuilder() { validationLevel = SchemaValidation.ALL; canEvolve = true; dbName = Warehouse.DEFAULT_DATABASE_NAME; + catName = Warehouse.DEFAULT_CATALOG_NAME; } public ISchemaBuilder setSchemaType(SchemaType schemaType) { @@ -56,6 +58,12 @@ public ISchemaBuilder setDbName(String dbName) { return this; } + public ISchemaBuilder inDb(Database db) { + this.catName = db.getCatalogName(); + this.dbName = db.getName(); + return this; + } + public ISchemaBuilder setCompatibility(SchemaCompatibility compatibility) { this.compatibility = compatibility; return this; @@ -86,7 +94,7 @@ public ISchema build() throws MetaException { throw new MetaException("You must provide a schemaType and name"); } ISchema iSchema = - new ISchema(schemaType, name, dbName, compatibility, validationLevel, canEvolve); + new ISchema(schemaType, name, catName, dbName, compatibility, validationLevel, canEvolve); if (schemaGroup != null) iSchema.setSchemaGroup(schemaGroup); if (description != null) iSchema.setDescription(description); return iSchema; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java index 38e5a8fcb9..d6ee6739f8 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java @@ -17,9 +17,14 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.thrift.TException; import java.util.ArrayList; import java.util.HashMap; @@ -31,7 +36,7 @@ * reference; 2. partition values; 3. whatever {@link StorageDescriptorBuilder} requires. */ public class PartitionBuilder extends StorageDescriptorBuilder { - private String dbName, tableName; + private String catName, dbName, tableName; private int createTime, lastAccessTime; private Map partParams; private List values; @@ -40,6 +45,7 @@ public PartitionBuilder() { // Set some reasonable defaults partParams = new HashMap<>(); createTime = lastAccessTime = (int)(System.currentTimeMillis() / 1000); + dbName = Warehouse.DEFAULT_DATABASE_NAME; super.setChild(this); } @@ -53,9 +59,10 @@ public PartitionBuilder setTableName(String tableName) { return this; } - public PartitionBuilder fromTable(Table table) { + public PartitionBuilder inTable(Table table) { this.dbName = table.getDbName(); this.tableName = table.getTableName(); + this.catName = table.getCatName(); setCols(table.getSd().getCols()); return this; } @@ -92,12 +99,21 @@ public PartitionBuilder addPartParam(String key, String value) { return this; } - public Partition build() throws MetaException { - if (dbName == null || tableName == null) { - throw new MetaException("database name and table name must be provided"); + public Partition build(Configuration conf) throws MetaException { + if (tableName == null) { + throw new MetaException("table name must be provided"); } if (values == null) throw new MetaException("You must provide partition values"); - return new Partition(values, dbName, tableName, createTime, lastAccessTime, buildSd(), + if (catName == null) catName = MetaStoreUtils.getDefaultCatalog(conf); + Partition p = new Partition(values, dbName, tableName, createTime, lastAccessTime, buildSd(), partParams); + p.setCatName(catName); + return p; + } + + public Partition addToTable(IMetaStoreClient client, Configuration conf) throws TException { + Partition p = build(conf); + client.add_partition(p); + return p; } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLCheckConstraintBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLCheckConstraintBuilder.java new file mode 100644 index 0000000000..ec99729a57 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLCheckConstraintBuilder.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client.builder; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; + +import java.util.ArrayList; +import java.util.List; + +public class SQLCheckConstraintBuilder extends ConstraintBuilder { + private String checkExpression; + + public SQLCheckConstraintBuilder() { + super.setChild(this); + } + + public SQLCheckConstraintBuilder setCheckExpression(String checkExpression) { + this.checkExpression = checkExpression; + return this; + } + + public List build(Configuration conf) throws MetaException { + if (checkExpression == null) { + throw new MetaException("check expression must be set"); + } + checkBuildable("check_constraint", conf); + List cc = new ArrayList<>(columns.size()); + for (String column : columns) { + cc.add(new SQLCheckConstraint(catName, dbName, tableName, column, checkExpression, + constraintName, enable, validate, rely)); + } + return cc; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLDefaultConstraintBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLDefaultConstraintBuilder.java new file mode 100644 index 0000000000..b24663d0e3 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLDefaultConstraintBuilder.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client.builder; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; + +import java.util.ArrayList; +import java.util.List; + +public class SQLDefaultConstraintBuilder extends ConstraintBuilder { + private Object defaultVal; + + public SQLDefaultConstraintBuilder() { + super.setChild(this); + } + + public SQLDefaultConstraintBuilder setDefaultVal(Object defaultVal) { + this.defaultVal = defaultVal; + return this; + } + + public List build(Configuration conf) throws MetaException { + if (defaultVal == null) { + throw new MetaException("default value must be set"); + } + checkBuildable("default_value", conf); + List dv = new ArrayList<>(columns.size()); + for (String column : columns) { + dv.add(new SQLDefaultConstraint(catName, dbName, tableName, column, + defaultVal.toString(), constraintName, enable, validate, rely)); + } + return dv; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java index a39319a1e4..f5adda1ecd 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java @@ -17,21 +17,30 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import java.util.ArrayList; +import java.util.List; + /** * Builder for {@link SQLForeignKey}. Requires what {@link ConstraintBuilder} requires, plus * primary key * database, table, column and name. */ public class SQLForeignKeyBuilder extends ConstraintBuilder { - private String pkDb, pkTable, pkColumn, pkName; + private String pkDb, pkTable, pkName; + private List pkColumns; private int updateRule, deleteRule; public SQLForeignKeyBuilder() { + super.setChild(this); updateRule = deleteRule = 0; + pkColumns = new ArrayList<>(); + pkDb = Warehouse.DEFAULT_DATABASE_NAME; } public SQLForeignKeyBuilder setPkDb(String pkDb) { @@ -44,8 +53,8 @@ public SQLForeignKeyBuilder setPkTable(String pkTable) { return this; } - public SQLForeignKeyBuilder setPkColumn(String pkColumn) { - this.pkColumn = pkColumn; + public SQLForeignKeyBuilder addPkColumn(String pkColumn) { + pkColumns.add(pkColumn); return this; } @@ -54,11 +63,11 @@ public SQLForeignKeyBuilder setPkName(String pkName) { return this; } - public SQLForeignKeyBuilder setPrimaryKey(SQLPrimaryKey pk) { - pkDb = pk.getTable_db(); - pkTable = pk.getTable_name(); - pkColumn = pk.getColumn_name(); - pkName = pk.getPk_name(); + public SQLForeignKeyBuilder fromPrimaryKey(List pk) { + pkDb = pk.get(0).getTable_db(); + pkTable = pk.get(0).getTable_name(); + for (SQLPrimaryKey pkcol : pk) pkColumns.add(pkcol.getColumn_name()); + pkName = pk.get(0).getPk_name(); return this; } @@ -72,12 +81,23 @@ public SQLForeignKeyBuilder setDeleteRule(int deleteRule) { return this; } - public SQLForeignKey build() throws MetaException { - checkBuildable("foreign_key"); - if (pkDb == null || pkTable == null || pkColumn == null || pkName == null) { - throw new MetaException("You must provide the primary key database, table, column, and name"); + public List build(Configuration conf) throws MetaException { + checkBuildable("to_" + pkTable + "_foreign_key", conf); + if (pkTable == null || pkColumns.isEmpty() || pkName == null) { + throw new MetaException("You must provide the primary key table, columns, and name"); + } + if (columns.size() != pkColumns.size()) { + throw new MetaException("The number of foreign columns must match the number of primary key" + + " columns"); + } + List fk = new ArrayList<>(columns.size()); + for (int i = 0; i < columns.size(); i++) { + SQLForeignKey keyCol = new SQLForeignKey(pkDb, pkTable, pkColumns.get(i), dbName, tableName, + columns.get(i), getNextSeq(), updateRule, deleteRule, constraintName, pkName, enable, + validate, rely); + keyCol.setCatName(catName); + fk.add(keyCol); } - return new SQLForeignKey(pkDb, pkTable, pkColumn, dbName, tableName, columnName, keySeq, - updateRule, deleteRule, constraintName, pkName, enable, validate, rely); + return fk; } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java index 77d1e497c5..497032eebc 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java @@ -17,9 +17,13 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import java.util.ArrayList; +import java.util.List; + /** * Builder for {@link SQLNotNullConstraint}. Only requires what {@link ConstraintBuilder} requires. */ @@ -29,9 +33,20 @@ public SQLNotNullConstraintBuilder() { super.setChild(this); } - public SQLNotNullConstraint build() throws MetaException { - checkBuildable("not_null_constraint"); - return new SQLNotNullConstraint(dbName, tableName, columnName, constraintName, enable, - validate, rely); + public SQLNotNullConstraintBuilder setColName(String colName) { + assert columns.isEmpty(); + columns.add(colName); + return this; + } + + public List build(Configuration conf) throws MetaException { + checkBuildable("not_null_constraint", conf); + List uc = new ArrayList<>(columns.size()); + for (String column : columns) { + SQLNotNullConstraint c = new SQLNotNullConstraint(catName, dbName, tableName, columns.get(0), + constraintName, enable, validate, rely); + uc.add(c); + } + return uc; } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java index 9000f86167..40f74bd6d0 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java @@ -17,9 +17,13 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import java.util.ArrayList; +import java.util.List; + /** * Builder for {@link SQLPrimaryKey}. Only requires what {@link ConstraintBuilder} requires. */ @@ -34,9 +38,15 @@ public SQLPrimaryKeyBuilder setPrimaryKeyName(String name) { return setConstraintName(name); } - public SQLPrimaryKey build() throws MetaException { - checkBuildable("primary_key"); - return new SQLPrimaryKey(dbName, tableName, columnName, keySeq, constraintName, enable, - validate, rely); + public List build(Configuration conf) throws MetaException { + checkBuildable("primary_key", conf); + List pk = new ArrayList<>(columns.size()); + for (String colName : columns) { + SQLPrimaryKey keyCol = new SQLPrimaryKey(dbName, tableName, colName, getNextSeq(), + constraintName, enable, validate, rely); + keyCol.setCatName(catName); + pk.add(keyCol); + } + return pk; } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java index 640e9d15c8..138ee158cd 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java @@ -17,9 +17,13 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import java.util.ArrayList; +import java.util.List; + /** * Builder for {@link SQLUniqueConstraint}. Only requires what {@link ConstraintBuilder} requires. */ @@ -29,9 +33,14 @@ public SQLUniqueConstraintBuilder() { super.setChild(this); } - public SQLUniqueConstraint build() throws MetaException { - checkBuildable("unique_constraint"); - return new SQLUniqueConstraint(dbName, tableName, columnName, keySeq, constraintName, enable, - validate, rely); + public List build(Configuration conf) throws MetaException { + checkBuildable("unique_constraint", conf); + List uc = new ArrayList<>(columns.size()); + for (String column : columns) { + SQLUniqueConstraint c = new SQLUniqueConstraint(catName, dbName, tableName, column, getNextSeq(), + constraintName, enable, validate, rely); + uc.add(c); + } + return uc; } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java index ceb0f49a86..521be3e383 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java @@ -23,8 +23,11 @@ import org.apache.hadoop.hive.metastore.api.SchemaVersion; import org.apache.hadoop.hive.metastore.api.SchemaVersionState; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + public class SchemaVersionBuilder extends SerdeAndColsBuilder { - private String schemaName, dbName; // required + private String schemaName, dbName, catName; // required private int version; // required private long createdAt; // required private SchemaVersionState state; // optional @@ -34,6 +37,8 @@ private String name; // optional public SchemaVersionBuilder() { + catName = DEFAULT_CATALOG_NAME; + dbName = DEFAULT_DATABASE_NAME; createdAt = System.currentTimeMillis() / 1000; version = -1; super.setChild(this); @@ -50,6 +55,7 @@ public SchemaVersionBuilder setDbName(String dbName) { } public SchemaVersionBuilder versionOf(ISchema schema) { + this.catName = schema.getCatName(); this.dbName = schema.getDbName(); this.schemaName = schema.getName(); return this; @@ -92,11 +98,11 @@ public SchemaVersionBuilder setName(String name) { } public SchemaVersion build() throws MetaException { - if (schemaName == null || dbName == null || version < 0) { - throw new MetaException("You must provide the database name, schema name, and schema version"); + if (schemaName == null || version < 0) { + throw new MetaException("You must provide the schema name, and schema version"); } SchemaVersion schemaVersion = - new SchemaVersion(new ISchemaName(dbName, schemaName), version, createdAt, getCols()); + new SchemaVersion(new ISchemaName(catName, dbName, schemaName), version, createdAt, getCols()); if (state != null) schemaVersion.setState(state); if (description != null) schemaVersion.setDescription(description); if (schemaText != null) schemaVersion.setSchemaText(schemaText); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java index 2b9f816960..79ef7debcd 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java @@ -17,48 +17,69 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.BasicTxnInfo; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.SecurityUtils; +import org.apache.thrift.TException; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; /** * Build a {@link Table}. The database name and table name must be provided, plus whatever is * needed by the underlying {@link StorageDescriptorBuilder}. */ public class TableBuilder extends StorageDescriptorBuilder { - private String dbName, tableName, owner, viewOriginalText, viewExpandedText, type; + private String catName, dbName, tableName, owner, viewOriginalText, viewExpandedText, type, + mvValidTxnList; private List partCols; private int createTime, lastAccessTime, retention; private Map tableParams; private boolean rewriteEnabled, temporary; + private Set mvReferencedTables; + public TableBuilder() { // Set some reasonable defaults + dbName = Warehouse.DEFAULT_DATABASE_NAME; tableParams = new HashMap<>(); createTime = lastAccessTime = (int)(System.currentTimeMillis() / 1000); retention = 0; partCols = new ArrayList<>(); type = TableType.MANAGED_TABLE.name(); + mvReferencedTables = new HashSet<>(); + temporary = false; super.setChild(this); } + public TableBuilder setCatName(String catName) { + this.catName = catName; + return this; + } + public TableBuilder setDbName(String dbName) { this.dbName = dbName; return this; } - public TableBuilder setDbName(Database db) { + public TableBuilder inDb(Database db) { this.dbName = db.getName(); + this.catName = db.getCatalogName(); return this; } @@ -139,9 +160,19 @@ public TableBuilder setTemporary(boolean temporary) { return this; } - public Table build() throws MetaException { - if (dbName == null || tableName == null) { - throw new MetaException("You must set the database and table name"); + public TableBuilder addMaterializedViewReferencedTable(String tableName) { + mvReferencedTables.add(tableName); + return this; + } + + public TableBuilder setMaterializedViewValidTxnList(ValidTxnList validTxnList) { + mvValidTxnList = validTxnList.writeToString(); + return this; + } + + public Table build(Configuration conf) throws MetaException { + if (tableName == null) { + throw new MetaException("You must set the table name"); } if (owner == null) { try { @@ -150,15 +181,24 @@ public Table build() throws MetaException { throw MetaStoreUtils.newMetaException(e); } } + if (catName == null) catName = MetaStoreUtils.getDefaultCatalog(conf); Table t = new Table(tableName, dbName, owner, createTime, lastAccessTime, retention, buildSd(), partCols, tableParams, viewOriginalText, viewExpandedText, type); - if (rewriteEnabled) { - t.setRewriteEnabled(true); - } - if (temporary) { - t.setTemporary(temporary); + if (rewriteEnabled) t.setRewriteEnabled(true); + if (temporary) t.setTemporary(temporary); + t.setCatName(catName); + if (!mvReferencedTables.isEmpty()) { + CreationMetadata cm = new CreationMetadata(catName, dbName, tableName, mvReferencedTables); + if (mvValidTxnList != null) cm.setValidTxnList(mvValidTxnList); + t.setCreationMetadata(cm); } return t; } + public Table create(IMetaStoreClient client, Configuration conf) throws TException { + Table t = build(conf); + client.createTable(t); + return t; + } + } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index b8976ed953..995137f967 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -306,6 +306,12 @@ public static ConfVars getMetaConf(String name) { CAPABILITY_CHECK("metastore.client.capability.check", "hive.metastore.client.capability.check", true, "Whether to check client capabilities for potentially breaking API usage."), + CATALOG_DEFAULT("metastore.catalog.default", "metastore.catalog.default", "hive", + "The default catalog to use when a catalog is not specified. Default is 'hive' (the " + + "default catalog)."), + CATALOGS_TO_CACHE("metastore.cached.rawstore.catalogs", "metastore.cached.rawstore.catalogs", + "hive", "Comma separated list of catalogs to cache in the CachedStore. Default is 'hive' " + + "(the default catalog). Empty string means all catalogs will be cached."), CLIENT_CONNECT_RETRY_DELAY("metastore.client.connect.retry.delay", "hive.metastore.client.connect.retry.delay", 1, TimeUnit.SECONDS, "Number of seconds for the client to wait between consecutive connection attempts"), diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/CreateCatalogEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/CreateCatalogEvent.java new file mode 100644 index 0000000000..e667277870 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/CreateCatalogEvent.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.events; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.api.Catalog; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public class CreateCatalogEvent extends ListenerEvent { + + private final Catalog cat; + + public CreateCatalogEvent(boolean status, IHMSHandler handler, Catalog cat) { + super(status, handler); + this.cat = cat; + } + + public Catalog getCatalog() { + return cat; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropCatalogEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropCatalogEvent.java new file mode 100644 index 0000000000..67c6d51b86 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropCatalogEvent.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.events; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.api.Catalog; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public class DropCatalogEvent extends ListenerEvent { + + private final Catalog cat; + + public DropCatalogEvent(boolean status, IHMSHandler handler, Catalog cat) { + super(status, handler); + this.cat = cat; + } + + public Catalog getCatalog() { + return cat; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropConstraintEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropConstraintEvent.java index 4c5918f1c3..ccd968b01d 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropConstraintEvent.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropConstraintEvent.java @@ -26,17 +26,23 @@ @InterfaceStability.Stable public class DropConstraintEvent extends ListenerEvent { + private final String catName; private final String dbName; private final String tableName; private final String constraintName; - public DropConstraintEvent(String dbName, String tableName, String constraintName, + public DropConstraintEvent(String catName, String dbName, String tableName, String constraintName, boolean status, IHMSHandler handler) { super(status, handler); + this.catName = catName; this.dbName = dbName; this.tableName = tableName; this.constraintName = constraintName; } + public String getCatName() { + return catName; + } + public String getDbName() { return dbName; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java index b963f78c06..aa014e9317 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java @@ -55,12 +55,13 @@ * @param status status of insert, true = success, false = failure * @param handler handler that is firing the event */ - public InsertEvent(String db, String table, List partVals, + public InsertEvent(String catName, String db, String table, List partVals, InsertEventRequestData insertData, boolean status, IHMSHandler handler) throws MetaException, NoSuchObjectException { super(status, handler); GetTableRequest req = new GetTableRequest(db, table); + req.setCatName(catName); // TODO MS-SPLIT Switch this back once HiveMetaStoreClient is moved. //req.setCapabilities(HiveMetaStoreClient.TEST_VERSION); req.setCapabilities(new ClientCapabilities( @@ -68,7 +69,8 @@ public InsertEvent(String db, String table, List partVals, try { this.tableObj = handler.get_table_req(req).getTable(); if (partVals != null) { - this.ptnObj = handler.get_partition(db, table, partVals); + this.ptnObj = handler.get_partition(MetaStoreUtils.prependNotNullCatToDbName(catName, db), + table, partVals); } else { this.ptnObj = null; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreCreateCatalogEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreCreateCatalogEvent.java new file mode 100644 index 0000000000..96aa22c7e4 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreCreateCatalogEvent.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.events; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.api.Catalog; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public class PreCreateCatalogEvent extends PreEventContext { + + private final Catalog cat; + + public PreCreateCatalogEvent(IHMSHandler handler, Catalog cat) { + super(PreEventType.CREATE_CATALOG, handler); + this.cat = cat; + } + + public Catalog getCatalog() { + return cat; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreDropCatalogEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreDropCatalogEvent.java new file mode 100644 index 0000000000..0e01ccd707 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreDropCatalogEvent.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.events; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.api.Catalog; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public class PreDropCatalogEvent extends PreEventContext { + + private final Catalog cat; + + public PreDropCatalogEvent(IHMSHandler handler, Catalog cat) { + super(PreEventType.DROP_CATALOG, handler); + this.cat = cat; + } + + public Catalog getCatalog() { + return cat; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java index 7ddb8fe758..b45a537755 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java @@ -50,7 +50,10 @@ ALTER_SCHEMA_VERSION, DROP_SCHEMA_VERSION, READ_ISCHEMA, - READ_SCHEMA_VERSION + READ_SCHEMA_VERSION, + CREATE_CATALOG, + DROP_CATALOG, + READ_CATALOG } private final PreEventType eventType; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreLoadPartitionDoneEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreLoadPartitionDoneEvent.java index 999ec31f65..a380301867 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreLoadPartitionDoneEvent.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreLoadPartitionDoneEvent.java @@ -28,18 +28,24 @@ @InterfaceStability.Stable public class PreLoadPartitionDoneEvent extends PreEventContext { + private final String catName; private final String dbName; private final String tableName; private final Map partSpec; - public PreLoadPartitionDoneEvent(String dbName, String tableName, + public PreLoadPartitionDoneEvent(String catName, String dbName, String tableName, Map partSpec, IHMSHandler handler) { super(PreEventType.LOAD_PARTITION_DONE, handler); + this.catName = catName; this.dbName = dbName; this.tableName = tableName; this.partSpec = partSpec; } + public String getCatName() { + return catName; + } + public String getDbName() { return dbName; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadCatalogEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadCatalogEvent.java new file mode 100644 index 0000000000..3f1afdfe54 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadCatalogEvent.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.events; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.api.Catalog; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public class PreReadCatalogEvent extends PreEventContext { + + private final Catalog cat; + + public PreReadCatalogEvent(IHMSHandler handler, Catalog cat) { + super(PreEventType.READ_CATALOG, handler); + this.cat = cat; + } + + public Catalog getCatalog() { + return cat; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/CreateCatalogMessage.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/CreateCatalogMessage.java new file mode 100644 index 0000000000..cbb0f4e245 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/CreateCatalogMessage.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.messaging; + +public abstract class CreateCatalogMessage extends EventMessage { + + protected CreateCatalogMessage() { + super(EventType.CREATE_CATALOG); + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/DropCatalogMessage.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/DropCatalogMessage.java new file mode 100644 index 0000000000..0e731ce477 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/DropCatalogMessage.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.messaging; + +public abstract class DropCatalogMessage extends EventMessage { + + protected DropCatalogMessage() { + super(EventType.DROP_CATALOG); + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java index 8578d4aec9..3cbfa553ed 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java @@ -53,7 +53,9 @@ DROP_ISCHEMA(MessageFactory.DROP_ISCHEMA_EVENT), ADD_SCHEMA_VERSION(MessageFactory.ADD_SCHEMA_VERSION_EVENT), ALTER_SCHEMA_VERSION(MessageFactory.ALTER_SCHEMA_VERSION_EVENT), - DROP_SCHEMA_VERSION(MessageFactory.DROP_SCHEMA_VERSION_EVENT); + DROP_SCHEMA_VERSION(MessageFactory.DROP_SCHEMA_VERSION_EVENT), + CREATE_CATALOG(MessageFactory.CREATE_CATALOG_EVENT), + DROP_CATALOG(MessageFactory.DROP_CATALOG_EVENT); private String typeString; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java index 5976c489c7..ab93f82e1d 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hive.metastore.messaging; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -65,6 +66,8 @@ public static final String ADD_SCHEMA_VERSION_EVENT = "ADD_SCHEMA_VERSION"; public static final String ALTER_SCHEMA_VERSION_EVENT = "ALTER_SCHEMA_VERSION"; public static final String DROP_SCHEMA_VERSION_EVENT = "DROP_SCHEMA_VERSION"; + public static final String CREATE_CATALOG_EVENT = "CREATE_CATALOG"; + public static final String DROP_CATALOG_EVENT = "DROP_CATALOG"; private static MessageFactory instance = null; @@ -276,4 +279,8 @@ public abstract InsertMessage buildInsertMessage(Table tableObj, Partition ptnOb */ public abstract DropConstraintMessage buildDropConstraintMessage(String dbName, String tableName, String constraintName); + + public abstract CreateCatalogMessage buildCreateCatalogMessage(Catalog catalog); + + public abstract DropCatalogMessage buildDropCatalogMessage(Catalog catalog); } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateCatalogMessage.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateCatalogMessage.java new file mode 100644 index 0000000000..8a26764651 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateCatalogMessage.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.messaging.json; + +import org.apache.hadoop.hive.metastore.messaging.CreateCatalogMessage; +import org.codehaus.jackson.annotate.JsonProperty; + +public class JSONCreateCatalogMessage extends CreateCatalogMessage { + + @JsonProperty + String server, servicePrincipal, catalog; + + @JsonProperty + Long timestamp; + + /** + * Required for Jackson + */ + public JSONCreateCatalogMessage() { + + } + + public JSONCreateCatalogMessage(String server, String servicePrincipal, String catalog, + Long timestamp) { + this.server = server; + this.servicePrincipal = servicePrincipal; + this.catalog = catalog; + this.timestamp = timestamp; + } + + @Override + public String getDB() { + return null; + } + + @Override + public String getServer() { + return server; + } + + @Override + public String getServicePrincipal() { + return servicePrincipal; + } + + public String getCatalog() { + return catalog; + } + + @Override + public Long getTimestamp() { + return timestamp; + } + + @Override + public String toString() { + try { + return JSONMessageDeserializer.mapper.writeValueAsString(this); + } + catch (Exception exception) { + throw new IllegalArgumentException("Could not serialize: ", exception); + } + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropCatalogMessage.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropCatalogMessage.java new file mode 100644 index 0000000000..58e95f4e1f --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropCatalogMessage.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.messaging.json; + +import org.apache.hadoop.hive.metastore.messaging.DropCatalogMessage; +import org.codehaus.jackson.annotate.JsonProperty; + +public class JSONDropCatalogMessage extends DropCatalogMessage { + + @JsonProperty + String server, servicePrincipal, catalog; + + @JsonProperty + Long timestamp; + + public JSONDropCatalogMessage() { + + } + + public JSONDropCatalogMessage(String server, String servicePrincipal, String catalog, + Long timestamp) { + this.server = server; + this.servicePrincipal = servicePrincipal; + this.catalog = catalog; + this.timestamp = timestamp; + } + + @Override + public String getDB() { + return null; + } + + @Override + public String getServer() { + return server; + } + + @Override + public String getServicePrincipal() { + return servicePrincipal; + } + + public String getCatalog() { + return catalog; + } + + @Override + public Long getTimestamp() { + return timestamp; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java index 4f03a27ed7..0fc53870e9 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java @@ -28,6 +28,7 @@ import com.google.common.collect.Iterables; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.NotificationEvent; @@ -45,9 +46,11 @@ import org.apache.hadoop.hive.metastore.messaging.AlterDatabaseMessage; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; +import org.apache.hadoop.hive.metastore.messaging.CreateCatalogMessage; import org.apache.hadoop.hive.metastore.messaging.CreateDatabaseMessage; import org.apache.hadoop.hive.metastore.messaging.CreateFunctionMessage; import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage; +import org.apache.hadoop.hive.metastore.messaging.DropCatalogMessage; import org.apache.hadoop.hive.metastore.messaging.DropConstraintMessage; import org.apache.hadoop.hive.metastore.messaging.DropDatabaseMessage; import org.apache.hadoop.hive.metastore.messaging.DropFunctionMessage; @@ -190,6 +193,16 @@ public DropConstraintMessage buildDropConstraintMessage(String dbName, String ta constraintName, now()); } + @Override + public CreateCatalogMessage buildCreateCatalogMessage(Catalog catalog) { + return new JSONCreateCatalogMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, catalog.getName(), now()); + } + + @Override + public DropCatalogMessage buildDropCatalogMessage(Catalog catalog) { + return new JSONDropCatalogMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, catalog.getName(), now()); + } + private long now() { return System.currentTimeMillis() / 1000; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MCatalog.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MCatalog.java new file mode 100644 index 0000000000..e82cb4322f --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MCatalog.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.model; + +public class MCatalog { + private String name; + private String description; + private String locationUri; + + public MCatalog() { + + } + + public MCatalog(String name, String description, String locationUri) { + this.name = name; + this.description = description; + this.locationUri = locationUri; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public String getLocationUri() { + return locationUri; + } + + public void setLocationUri(String locationUri) { + this.locationUri = locationUri; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MCreationMetadata.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MCreationMetadata.java index 1133cb1242..66b5d48e90 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MCreationMetadata.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MCreationMetadata.java @@ -27,6 +27,7 @@ */ public class MCreationMetadata { + private String catalogName; private String dbName; private String tblName; private Set tables; @@ -35,8 +36,9 @@ public MCreationMetadata() { } - public MCreationMetadata(String dbName, String tblName, + public MCreationMetadata(String catName, String dbName, String tblName, Set tables, String txnList) { + this.catalogName = catName; this.dbName = dbName; this.tblName = tblName; this.tables = tables; @@ -59,6 +61,14 @@ public void setTxnList(String txnList) { this.txnList = txnList; } + public String getCatalogName() { + return catalogName; + } + + public void setCatalogName(String catName) { + this.catalogName = catName; + } + public String getDbName() { return dbName; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MDatabase.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MDatabase.java index e8034ce0e7..fa30330e78 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MDatabase.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MDatabase.java @@ -34,6 +34,7 @@ private Map parameters; private String ownerName; private String ownerType; + private String catalogName; /** * Default construction to keep jpox/jdo happy @@ -46,12 +47,13 @@ public MDatabase() {} * @param locationUri Location of the database in the warehouse * @param description Comment describing the database */ - public MDatabase(String name, String locationUri, String description, + public MDatabase(String catalogName, String name, String locationUri, String description, Map parameters) { this.name = name; this.locationUri = locationUri; this.description = description; this.parameters = parameters; + this.catalogName = catalogName; } /** @@ -125,4 +127,12 @@ public String getOwnerType() { public void setOwnerType(String ownerType) { this.ownerType = ownerType; } + + public String getCatalogName() { + return catalogName; + } + + public void setCatalogName(String catalogName) { + this.catalogName = catalogName; + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MNotificationLog.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MNotificationLog.java index 1b1f7fdf85..60914aea77 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MNotificationLog.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MNotificationLog.java @@ -22,6 +22,7 @@ private long eventId; // This is not the datanucleus id, but the id assigned by the sequence private int eventTime; private String eventType; + private String catalogName; private String dbName; private String tableName; private String message; @@ -30,10 +31,11 @@ public MNotificationLog() { } - public MNotificationLog(int eventId, String eventType, String dbName, String tableName, + public MNotificationLog(int eventId, String eventType, String catName, String dbName, String tableName, String message) { this.eventId = eventId; this.eventType = eventType; + this.catalogName = catName; this.dbName = dbName; this.tableName = tableName; this.message = message; @@ -72,6 +74,14 @@ public void setDbName(String dbName) { this.dbName = dbName; } + public String getCatalogName() { + return catalogName; + } + + public void setCatalogName(String catName) { + this.catalogName = catName; + } + public String getTableName() { return tableName; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java index f7ef6fc55a..50d9c5b0cf 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java @@ -34,6 +34,7 @@ private MPartition partition; + private String catName; private String dbName; private String tableName; private String partitionName; @@ -137,6 +138,14 @@ public void setDbName(String dbName) { this.dbName = dbName; } + public String getCatName() { + return catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + public MPartition getPartition() { return partition; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionEvent.java index 50c5045583..d0cc51a3fc 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionEvent.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionEvent.java @@ -21,6 +21,8 @@ public class MPartitionEvent { + private String catalogName; + private String dbName; private String tblName; @@ -31,8 +33,9 @@ private int eventType; - public MPartitionEvent(String dbName, String tblName, String partitionName, int eventType) { + public MPartitionEvent(String catName, String dbName, String tblName, String partitionName, int eventType) { super(); + this.catalogName = catName; this.dbName = dbName; this.tblName = tblName; this.partName = partitionName; @@ -42,6 +45,10 @@ public MPartitionEvent(String dbName, String tblName, String partitionName, int public MPartitionEvent() {} + public void setCatalogName(String catName) { + this.catalogName = catName; + } + /** * @param dbName the dbName to set */ diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java index ec613179e4..731cd6f7fa 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java @@ -33,6 +33,7 @@ public class MTableColumnStatistics { private MTable table; + private String catName; private String dbName; private String tableName; private String colName; @@ -151,6 +152,14 @@ public void setDbName(String dbName) { this.dbName = dbName; } + public String getCatName() { + return catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + public void setBooleanStats(Long numTrues, Long numFalses, Long numNulls) { this.numTrues = numTrues; this.numFalses = numFalses; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java index e34335d3e1..92813b9eb8 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java @@ -26,11 +26,14 @@ import java.util.List; import java.util.Map; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + /** * Implementation of PartitionSpecProxy that composes a list of PartitionSpecProxy. */ public class CompositePartitionSpecProxy extends PartitionSpecProxy { + private String catName; private String dbName; private String tableName; private List partitionSpecs; @@ -40,10 +43,12 @@ protected CompositePartitionSpecProxy(List partitionSpecs) { this.partitionSpecs = partitionSpecs; if (partitionSpecs.isEmpty()) { + catName = null; dbName = null; tableName = null; } else { + catName = partitionSpecs.get(0).getCatName(); dbName = partitionSpecs.get(0).getDbName(); tableName = partitionSpecs.get(0).getTableName(); this.partitionSpecProxies = new ArrayList<>(partitionSpecs.size()); @@ -57,7 +62,15 @@ protected CompositePartitionSpecProxy(List partitionSpecs) { assert isValid() : "Invalid CompositePartitionSpecProxy!"; } + @Deprecated protected CompositePartitionSpecProxy(String dbName, String tableName, List partitionSpecs) { + this(DEFAULT_CATALOG_NAME, dbName, tableName, partitionSpecs); + + } + + protected CompositePartitionSpecProxy(String catName, String dbName, String tableName, + List partitionSpecs) { + this.catName = catName; this.dbName = dbName; this.tableName = tableName; this.partitionSpecs = partitionSpecs; @@ -146,6 +159,11 @@ public Partition getCurrent() { } @Override + public String getCatName() { + return composite.getCatName(); + } + + @Override public String getDbName() { return composite.dbName; } @@ -182,6 +200,15 @@ public void setCreateTime(long time) { } @Override + public void setCatName(String catName) { + this.catName = catName; + for (PartitionSpecProxy partSpecProxy : partitionSpecProxies) { + partSpecProxy.setCatName(catName); + } + + } + + @Override public void setDbName(String dbName) { this.dbName = dbName; for (PartitionSpecProxy partSpecProxy : partitionSpecProxies) { @@ -198,6 +225,11 @@ public void setTableName(String tableName) { } @Override + public String getCatName() { + return catName; + } + + @Override public String getDbName() { return dbName; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java index 7b0550bfc1..6bd29d0211 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java @@ -40,6 +40,11 @@ protected PartitionListComposingSpecProxy(PartitionSpec partitionSpec) { } @Override + public String getCatName() { + return partitionSpec.getCatName(); + } + + @Override public String getDbName() { return partitionSpec.getDbName(); } @@ -65,6 +70,14 @@ public int size() { } @Override + public void setCatName(String catName) { + partitionSpec.setCatName(catName); + for (Partition partition : partitionSpec.getPartitionList().getPartitions()) { + partition.setCatName(catName); + } + } + + @Override public void setDbName(String dbName) { partitionSpec.setDbName(dbName); for (Partition partition : partitionSpec.getPartitionList().getPartitions()) { @@ -118,6 +131,11 @@ public Partition getCurrent() { } @Override + public String getCatName() { + return partitionSpecProxy.getCatName(); + } + + @Override public String getDbName() { return partitionSpecProxy.getDbName(); } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java index 2640a241ab..ff2dea15fa 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java @@ -37,6 +37,12 @@ public abstract int size(); /** + * Set catalog name. + * @param catName catalog name. + */ + public abstract void setCatName(String catName); + + /** * Setter for name of the DB. * @param dbName The name of the DB. */ @@ -49,6 +55,12 @@ public abstract void setTableName(String tableName); /** + * Get catalog name. + * @return catalog name. + */ + public abstract String getCatName(); + + /** * Getter for name of the DB. * @return The name of the DB. */ @@ -131,6 +143,12 @@ public static PartitionSpecProxy get(List partitionSpecs) { Partition getCurrent(); /** + * Get the catalog name. + * @return catalog name. + */ + String getCatName(); + + /** * Getter for the name of the DB. * @return Name of the DB. */ @@ -184,6 +202,7 @@ public static PartitionSpecProxy get(List partitionSpecs) { public SimplePartitionWrapperIterator(Partition partition) {this.partition = partition;} @Override public Partition getCurrent() { return partition; } + @Override public String getCatName() { return partition.getCatName(); } @Override public String getDbName() { return partition.getDbName(); } @Override public String getTableName() { return partition.getTableName(); } @Override public Map getParameters() { return partition.getParameters(); } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java index 36b05f7153..61e00ea0a5 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java @@ -49,6 +49,11 @@ public int size() { } @Override + public void setCatName(String catName) { + partitionSpec.setCatName(catName); + } + + @Override public void setDbName(String dbName) { partitionSpec.setDbName(dbName); } @@ -59,6 +64,11 @@ public void setTableName(String tableName) { } @Override + public String getCatName() { + return partitionSpec.getCatName(); + } + + @Override public String getDbName() { return partitionSpec.getDbName(); } @@ -121,7 +131,7 @@ public Partition getCurrent() { StorageDescriptor partSD = new StorageDescriptor(pSpec.getSd()); partSD.setLocation(partSD.getLocation() + partWithoutSD.getRelativePath()); - return new Partition( + Partition p = new Partition( partWithoutSD.getValues(), partitionSpecWithSharedSDProxy.partitionSpec.getDbName(), partitionSpecWithSharedSDProxy.partitionSpec.getTableName(), @@ -130,6 +140,13 @@ public Partition getCurrent() { partSD, partWithoutSD.getParameters() ); + p.setCatName(partitionSpecWithSharedSDProxy.partitionSpec.getCatName()); + return p; + } + + @Override + public String getCatName() { + return partitionSpecWithSharedSDProxy.partitionSpec.getCatName(); } @Override diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java index 7f4d9b0374..9cdf271b05 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java @@ -46,6 +46,8 @@ private static final String tableName = "internal_smoke_test_table"; private static final String partValue = "internal_smoke_test_val1"; + private static Configuration conf; + private SmokeTest() { } @@ -63,25 +65,22 @@ private void runTest(IMetaStoreClient client) throws TException { Database db = new DatabaseBuilder() .setName(dbName) .setLocation(dbDir.getAbsolutePath()) - .build(); - client.createDatabase(db); + .create(client, conf); LOG.info("Going to create table " + tableName); Table table = new TableBuilder() - .setDbName(db) + .inDb(db) .setTableName(tableName) .addCol("col1", ColumnType.INT_TYPE_NAME) .addCol("col2", ColumnType.TIMESTAMP_TYPE_NAME) .addPartCol("pcol1", ColumnType.STRING_TYPE_NAME) - .build(); - client.createTable(table); + .create(client, conf); LOG.info("Going to create partition with value " + partValue); Partition part = new PartitionBuilder() - .fromTable(table) + .inTable(table) .addValue("val1") - .build(); - client.add_partition(part); + .addToTable(client, conf); LOG.info("Going to list the partitions"); List parts = client.listPartitions(dbName, tableName, (short)-1); @@ -96,7 +95,7 @@ private void runTest(IMetaStoreClient client) throws TException { public static void main(String[] args) throws Exception { SmokeTest test = new SmokeTest(); - Configuration conf = MetastoreConf.newMetastoreConf(); + conf = MetastoreConf.newMetastoreConf(); IMetaStoreClient client = new HiveMetaStoreClient(conf); test.runTest(client); } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java index e373753cbc..723b6f89fe 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -118,6 +118,28 @@ protected DateFormat initialValue() { private static final Charset ENCODING = StandardCharsets.UTF_8; private static final Logger LOG = LoggerFactory.getLogger(MetaStoreUtils.class); + // The following two are public for any external users who wish to use them. + /** + * This character is used to mark a database name as having a catalog name prepended. This + * marker should be placed first in the String to make it easy to determine that this has both + * a catalog and a database name. @ is chosen as it is not used in regular expressions. This + * is only intended for use when making old Thrift calls that do not support catalog names. + */ + public static final char CATALOG_DB_THRIFT_NAME_MARKER = '@'; + + /** + * This String is used to seaprate the catalog name from the database name. This should only + * be used in Strings that are prepended with {@link #CATALOG_DB_THRIFT_NAME_MARKER}. # is + * chosen because it is not used in regular expressions. this is only intended for use when + * making old Thrift calls that do not support catalog names. + */ + public static final String CATALOG_DB_SEPARATOR = "#"; + + /** + * Mark a database as being empty (as distinct from null). + */ + public static final String DB_EMPTY_MARKER = "!"; + // Right now we only support one special character '/'. // More special characters can be added accordingly in the future. // NOTE: @@ -217,7 +239,7 @@ public static MetaException newMetaException(String errorMessage, Exception e) { // Given a list of partStats, this function will give you an aggr stats public static List aggrPartitionStats(List partStats, - String dbName, String tableName, List partNames, List colNames, + String catName, String dbName, String tableName, List partNames, List colNames, boolean areAllPartsFound, boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException { Map> colStatsMap = @@ -237,12 +259,12 @@ public static MetaException newMetaException(String errorMessage, Exception e) { new ArrayList()); } colStatsMap.get(aliasToAggregator.get(obj.getColName())) - .add(new ColStatsObjWithSourceInfo(obj, dbName, tableName, partName)); + .add(new ColStatsObjWithSourceInfo(obj, catName, dbName, tableName, partName)); } } if (colStatsMap.size() < 1) { - LOG.debug("No stats data found for: dbName= {}, tblName= {}, partNames= {}, colNames= {}", - dbName, tableName, partNames, colNames); + LOG.debug("No stats data found for: tblName= {}, partNames= {}, colNames= {}", + Warehouse.getCatalogQualifiedTableName(catName, dbName, tableName), partNames, colNames); return new ArrayList(); } return aggrPartitionStats(colStatsMap, partNames, areAllPartsFound, @@ -1618,13 +1640,15 @@ public static WMPoolSchedulingPolicy parseSchedulingPolicy(String schedulingPoli // ColumnStatisticsObj with info about its db, table, partition (if table is partitioned) public static class ColStatsObjWithSourceInfo { private final ColumnStatisticsObj colStatsObj; + private final String catName; private final String dbName; private final String tblName; private final String partName; - public ColStatsObjWithSourceInfo(ColumnStatisticsObj colStatsObj, String dbName, String tblName, + public ColStatsObjWithSourceInfo(ColumnStatisticsObj colStatsObj, String catName, String dbName, String tblName, String partName) { this.colStatsObj = colStatsObj; + this.catName = catName; this.dbName = dbName; this.tblName = tblName; this.partName = partName; @@ -1634,6 +1658,10 @@ public ColumnStatisticsObj getColStatsObj() { return colStatsObj; } + public String getCatName() { + return catName; + } + public String getDbName() { return dbName; } @@ -1646,4 +1674,105 @@ public String getPartName() { return partName; } } + + private static boolean hasCatalogName(String dbName) { + return dbName != null && dbName.length() > 0 && + dbName.charAt(0) == CATALOG_DB_THRIFT_NAME_MARKER; + } + + /** + * Given a catalog name and database name cram them together into one string. This method can + * be used if you do not know the catalog name, in which case the default catalog will be + * retrieved from the conf object. The resulting string can be parsed apart again via + * {@link #parseDbName(String, Configuration)}. + * @param catalogName catalog name, can be null if no known. + * @param dbName database name, can be null or empty. + * @param conf configuration object, used to determine default catalog if catalogName is null + * @return one string that contains both. + */ + public static String prependCatalogToDbName(@Nullable String catalogName, @Nullable String dbName, + Configuration conf) { + if (catalogName == null) catalogName = getDefaultCatalog(conf); + StringBuilder buf = new StringBuilder() + .append(CATALOG_DB_THRIFT_NAME_MARKER) + .append(catalogName) + .append(CATALOG_DB_SEPARATOR); + if (dbName != null) { + if (dbName.isEmpty()) buf.append(DB_EMPTY_MARKER); + else buf.append(dbName); + } + return buf.toString(); + } + + /** + * Given a catalog name and database name, cram them together into one string. These can be + * parsed apart again via {@link #parseDbName(String, Configuration)}. + * @param catalogName catalog name. This cannot be null. If this might be null use + * {@link #prependCatalogToDbName(String, String, Configuration)} instead. + * @param dbName database name. + * @return one string that contains both. + */ + public static String prependNotNullCatToDbName(String catalogName, String dbName) { + assert catalogName != null; + return prependCatalogToDbName(catalogName, dbName, null); + } + + /** + * Prepend the default 'hive' catalog onto the database name. + * @param dbName database name + * @param conf configuration object, used to determine default catalog + * @return one string with the 'hive' catalog name prepended. + */ + public static String prependCatalogToDbName(String dbName, Configuration conf) { + return prependCatalogToDbName(null, dbName, conf); + } + + private final static String[] nullCatalogAndDatabase = {null, null}; + + /** + * Parse the catalog name out of the database name. If no catalog name is present then the + * default catalog (as set in configuration file) will be assumed. + * @param dbName name of the database. This may or may not contain the catalog name. + * @param conf configuration object, used to determine the default catalog if it is not present + * in the database name. + * @return an array of two elements, the first being the catalog name, the second the database + * name. + * @throws MetaException if the name is not either just a database name or a catalog plus + * database name with the proper delimiters. + */ + public static String[] parseDbName(String dbName, Configuration conf) throws MetaException { + if (dbName == null) return nullCatalogAndDatabase; + if (hasCatalogName(dbName)) { + if (dbName.endsWith(CATALOG_DB_SEPARATOR)) { + // This means the DB name is null + return new String[] {dbName.substring(1, dbName.length() - 1), null}; + } else if (dbName.endsWith(DB_EMPTY_MARKER)) { + // This means the DB name is empty + return new String[] {dbName.substring(1, dbName.length() - DB_EMPTY_MARKER.length() - 1), ""}; + } + String[] names = dbName.substring(1).split(CATALOG_DB_SEPARATOR, 2); + if (names.length != 2) { + throw new MetaException(dbName + " is prepended with the catalog marker but does not " + + "appear to have a catalog name in it"); + } + return names; + } else { + return new String[] {getDefaultCatalog(conf), dbName}; + } + } + + /** + * Position in the array returned by {@link #parseDbName} that has the catalog name. + */ + public static final int CAT_NAME = 0; + /** + * Position in the array returned by {@link #parseDbName} that has the database name. + */ + public static final int DB_NAME = 1; + + public static String getDefaultCatalog(Configuration conf) { + String catName = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CATALOG_DEFAULT); + if (catName == null || "".equals(catName)) catName = Warehouse.DEFAULT_CATALOG_NAME; + return catName; + } } diff --git standalone-metastore/src/main/resources/package.jdo standalone-metastore/src/main/resources/package.jdo index 7612509377..8d5ae5d49f 100644 --- standalone-metastore/src/main/resources/package.jdo +++ standalone-metastore/src/main/resources/package.jdo @@ -31,9 +31,15 @@ + + + + - + + + @@ -61,6 +67,22 @@ + + + + + + + + + + + + + + + + @@ -191,6 +213,9 @@ + + + @@ -827,6 +852,9 @@ + + + @@ -874,6 +902,9 @@ + + + @@ -938,6 +969,9 @@ + + + @@ -1092,6 +1126,9 @@ + + + diff --git standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql index de9688d111..0003048f79 100644 --- standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql +++ standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql @@ -15,8 +15,15 @@ CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN VARCHAR(8000 -- ---------------------------------------------- -- DDL Statements for tables -- ---------------------------------------------- - -CREATE TABLE "APP"."DBS" ("DB_ID" BIGINT NOT NULL, "DESC" VARCHAR(4000), "DB_LOCATION_URI" VARCHAR(4000) NOT NULL, "NAME" VARCHAR(128), "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10)); +CREATE TABLE "APP"."DBS" ( + "DB_ID" BIGINT NOT NULL, + "DESC" VARCHAR(4000), + "DB_LOCATION_URI" VARCHAR(4000) NOT NULL, + "NAME" VARCHAR(128), + "OWNER_NAME" VARCHAR(128), + "OWNER_TYPE" VARCHAR(10), + "CTLG_NAME" VARCHAR(256) NOT NULL +); CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT); @@ -54,7 +61,15 @@ CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); -CREATE TABLE "APP"."PARTITION_EVENTS" ("PART_NAME_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_TIME" BIGINT NOT NULL, "EVENT_TYPE" INTEGER NOT NULL, "PARTITION_NAME" VARCHAR(767), "TBL_NAME" VARCHAR(256)); +CREATE TABLE "APP"."PARTITION_EVENTS" ( + "PART_NAME_ID" BIGINT NOT NULL, + "CAT_NAME" VARCHAR(256), + "DB_NAME" VARCHAR(128), + "EVENT_TIME" BIGINT NOT NULL, + "EVENT_TYPE" INTEGER NOT NULL, + "PARTITION_NAME" VARCHAR(767), + "TBL_NAME" VARCHAR(256) +); CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); @@ -70,7 +85,29 @@ CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, "NEXT_VAL" BIGINT NOT NULL); -CREATE TABLE "APP"."TAB_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(256) NOT NULL, "COLUMN_NAME" VARCHAR(767) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000),"BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, "BIT_VECTOR" BLOB); +CREATE TABLE "APP"."TAB_COL_STATS"( + "CAT_NAME" VARCHAR(256) NOT NULL, + "DB_NAME" VARCHAR(128) NOT NULL, + "TABLE_NAME" VARCHAR(256) NOT NULL, + "COLUMN_NAME" VARCHAR(767) NOT NULL, + "COLUMN_TYPE" VARCHAR(128) NOT NULL, + "LONG_LOW_VALUE" BIGINT, + "LONG_HIGH_VALUE" BIGINT, + "DOUBLE_LOW_VALUE" DOUBLE, + "DOUBLE_HIGH_VALUE" DOUBLE, + "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), + "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000), + "NUM_DISTINCTS" BIGINT, + "NUM_NULLS" BIGINT NOT NULL, + "AVG_COL_LEN" DOUBLE, + "MAX_COL_LEN" BIGINT, + "NUM_TRUES" BIGINT, + "NUM_FALSES" BIGINT, + "LAST_ANALYZED" BIGINT, + "CS_ID" BIGINT NOT NULL, + "TBL_ID" BIGINT NOT NULL, + "BIT_VECTOR" BLOB +); CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB); @@ -96,7 +133,30 @@ CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT NULL generated always as CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, "TOKEN" VARCHAR(767)); -CREATE TABLE "APP"."PART_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(256) NOT NULL, "PARTITION_NAME" VARCHAR(767) NOT NULL, "COLUMN_NAME" VARCHAR(767) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "BIT_VECTOR" BLOB, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "PART_ID" BIGINT NOT NULL); +CREATE TABLE "APP"."PART_COL_STATS"( + "CAT_NAME" VARCHAR(256) NOT NULL, + "DB_NAME" VARCHAR(128) NOT NULL, + "TABLE_NAME" VARCHAR(256) NOT NULL, + "PARTITION_NAME" VARCHAR(767) NOT NULL, + "COLUMN_NAME" VARCHAR(767) NOT NULL, + "COLUMN_TYPE" VARCHAR(128) NOT NULL, + "LONG_LOW_VALUE" BIGINT, + "LONG_HIGH_VALUE" BIGINT, + "DOUBLE_LOW_VALUE" DOUBLE, + "DOUBLE_HIGH_VALUE" DOUBLE, + "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), + "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000), + "NUM_DISTINCTS" BIGINT, + "BIT_VECTOR" BLOB, + "NUM_NULLS" BIGINT NOT NULL, + "AVG_COL_LEN" DOUBLE, + "MAX_COL_LEN" BIGINT, + "NUM_TRUES" BIGINT, + "NUM_FALSES" BIGINT, + "LAST_ANALYZED" BIGINT, + "CS_ID" BIGINT NOT NULL, + "PART_ID" BIGINT NOT NULL +); CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255)); @@ -104,7 +164,17 @@ CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000 CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL); -CREATE TABLE "APP"."NOTIFICATION_LOG" ("NL_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_ID" BIGINT NOT NULL, "EVENT_TIME" INTEGER NOT NULL, "EVENT_TYPE" VARCHAR(32) NOT NULL, "MESSAGE" CLOB, "TBL_NAME" VARCHAR(256), "MESSAGE_FORMAT" VARCHAR(16)); +CREATE TABLE "APP"."NOTIFICATION_LOG" ( + "NL_ID" BIGINT NOT NULL, + "CAT_NAME" VARCHAR(256), + "DB_NAME" VARCHAR(128), + "EVENT_ID" BIGINT NOT NULL, + "EVENT_TIME" INTEGER NOT NULL, + "EVENT_TYPE" VARCHAR(32) NOT NULL, + "MESSAGE" CLOB, + "TBL_NAME" VARCHAR(256), + "MESSAGE_FORMAT" VARCHAR(16) +); CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL); @@ -124,6 +194,7 @@ CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID BIGINT NOT NULL, RP_ID BIGINT NOT NU CREATE TABLE "APP"."MV_CREATION_METADATA" ( "MV_CREATION_METADATA_ID" BIGINT NOT NULL, + "CAT_NAME" VARCHAR(256) NOT NULL, "DB_NAME" VARCHAR(128) NOT NULL, "TBL_NAME" VARCHAR(256) NOT NULL, "TXN_LIST" CLOB @@ -134,6 +205,12 @@ CREATE TABLE "APP"."MV_TABLES_USED" ( "TBL_ID" BIGINT NOT NULL ); +CREATE TABLE "APP"."CTLGS" ( + "CTLG_ID" BIGINT NOT NULL, + "NAME" VARCHAR(256) UNIQUE, + "DESC" VARCHAR(4000), + "LOCATION_URI" VARCHAR(4000) NOT NULL); + -- ---------------------------------------------- -- DML Statements -- ---------------------------------------------- @@ -150,7 +227,7 @@ CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("TBL_ID CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE"); -CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); +CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE"); @@ -160,7 +237,7 @@ CREATE INDEX "APP"."TABLEPRIVILEGEINDEX" ON "APP"."TBL_PRIVS" ("TBL_ID", "PRINCI CREATE UNIQUE INDEX "APP"."UNIQUETABLE" ON "APP"."TBLS" ("TBL_NAME", "DB_ID"); -CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME"); +CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME", "CTLG_NAME"); CREATE UNIQUE INDEX "APP"."USERROLEMAPINDEX" ON "APP"."ROLE_MAP" ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE"); @@ -192,6 +269,9 @@ CREATE UNIQUE INDEX "APP"."UNIQUE_WM_MAPPING" ON "APP"."WM_MAPPING" ("RP_ID", "E CREATE UNIQUE INDEX "APP"."MV_UNIQUE_TABLE" ON "APP"."MV_CREATION_METADATA" ("TBL_NAME", "DB_NAME"); +CREATE UNIQUE INDEX "APP"."UNIQUE_CATALOG" ON "APP"."CTLGS" ("NAME"); + + -- ---------------------------------------------- -- DDL Statements for keys -- ---------------------------------------------- @@ -289,6 +369,9 @@ ALTER TABLE "APP"."METASTORE_DB_PROPERTIES" ADD CONSTRAINT "PROPERTY_KEY_PK" PRI ALTER TABLE "APP"."MV_CREATION_METADATA" ADD CONSTRAINT "MV_CREATION_METADATA_PK" PRIMARY KEY ("MV_CREATION_METADATA_ID"); +ALTER TABLE "APP"."CTLGS" ADD CONSTRAINT "CTLG_PK" PRIMARY KEY ("CTLG_ID"); + + -- foreign ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; @@ -322,6 +405,8 @@ ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK2" FOREIGN KEY ("SD_ID") REFEREN ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; +ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION; + ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; @@ -394,6 +479,8 @@ ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK1" FOREIGN K ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; +ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_CTLG_FK" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION; + -- ---------------------------------------------- -- DDL Statements for checks -- ---------------------------------------------- diff --git standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql index 8925fa827f..6aa2e82597 100644 --- standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql +++ standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql @@ -81,6 +81,7 @@ CREATE UNIQUE INDEX "APP"."UNIQUE_SCHEMA_VERSION" ON "APP"."SCHEMA_VERSION" ("SC -- create mv_creation_metadata table CREATE TABLE "APP"."MV_CREATION_METADATA" ( "MV_CREATION_METADATA_ID" BIGINT NOT NULL, + "CAT_NAME" VARCHAR(256) NOT NULL, "DB_NAME" VARCHAR(128) NOT NULL, "TBL_NAME" VARCHAR(256) NOT NULL, "TXN_LIST" CLOB @@ -160,5 +161,62 @@ ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD COLUMN "DEFAULT_VALUE" VARCHAR(400); ALTER TABLE "APP"."HIVE_LOCKS" ALTER COLUMN "HL_TXNID" NOT NULL; +-- Create new Catalog table +-- HIVE-18755, add catalogs +-- new catalogs table +CREATE TABLE "APP"."CTLGS" ( + "CTLG_ID" BIGINT NOT NULL, + "NAME" VARCHAR(256) UNIQUE, + "DESC" VARCHAR(4000), + "LOCATION_URI" VARCHAR(4000) NOT NULL); + +ALTER TABLE "APP"."CTLGS" ADD CONSTRAINT "CTLGS_PK" PRIMARY KEY ("CTLG_ID"); + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO "APP"."CTLGS" VALUES (1, 'hive', 'Default catalog for Hive', 'TBD'); + +-- Drop the unique index on DBS +DROP INDEX "APP"."UNIQUE_DATABASE"; + +-- Add the new column to the DBS table, can't put in the not null constraint yet +ALTER TABLE "APP"."DBS" ADD COLUMN "CTLG_NAME" VARCHAR(256); + +-- Update all records in the DBS table to point to the Hive catalog +UPDATE "APP"."DBS" + SET "CTLG_NAME" = 'hive'; + +-- Add the not null constraint +ALTER TABLE "APP"."DBS" ALTER COLUMN "CTLG_NAME" NOT NULL; + +-- Put back the unique index +CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME", "CTLG_NAME"); + +-- Add the foreign key +ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION; + +-- Add columns to table stats and part stats +ALTER TABLE "APP"."TAB_COL_STATS" ADD COLUMN "CAT_NAME" VARCHAR(256); +ALTER TABLE "APP"."PART_COL_STATS" ADD COLUMN "CAT_NAME" VARCHAR(256); + +-- Set the existing column names to Hive +UPDATE "APP"."TAB_COL_STATS" + SET "CAT_NAME" = 'hive'; +UPDATE "APP"."PART_COL_STATS" + SET "CAT_NAME" = 'hive'; + +-- Add the not null constraint +ALTER TABLE "APP"."TAB_COL_STATS" ALTER COLUMN "CAT_NAME" NOT NULL; +ALTER TABLE "APP"."PART_COL_STATS" ALTER COLUMN "CAT_NAME" NOT NULL; + +-- Rebuild the index for Part col stats. No such index for table stats, which seems weird +DROP INDEX "APP"."PCS_STATS_IDX"; +CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); + +-- Add column to partition events +ALTER TABLE "APP"."PARTITION_EVENTS" ADD COLUMN "CAT_NAME" VARCHAR(256); + +-- Add column to notification log +ALTER TABLE "APP"."NOTIFICATION_LOG" ADD COLUMN "CAT_NAME" VARCHAR(256); + -- This needs to be the last thing done. Insert any changes above this line. UPDATE "APP".VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; diff --git standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql index 68237ec1fa..77afd60f96 100644 --- standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql +++ standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql @@ -93,12 +93,13 @@ CREATE TABLE PART_COL_STATS NUM_TRUES bigint NULL, PART_ID bigint NULL, PARTITION_NAME nvarchar(767) NOT NULL, - "TABLE_NAME" nvarchar(256) NOT NULL + "TABLE_NAME" nvarchar(256) NOT NULL, + "CAT_NAME" nvarchar(256) NOT NULL ); ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID); -CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); -- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege] CREATE TABLE PART_PRIVS @@ -236,7 +237,8 @@ CREATE TABLE TAB_COL_STATS NUM_NULLS bigint NOT NULL, NUM_TRUES bigint NULL, TBL_ID bigint NULL, - "TABLE_NAME" nvarchar(256) NOT NULL + "TABLE_NAME" nvarchar(256) NOT NULL, + "CAT_NAME" nvarchar(256) NOT NULL ); ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID); @@ -276,7 +278,8 @@ CREATE TABLE DBS DB_LOCATION_URI nvarchar(4000) NOT NULL, "NAME" nvarchar(128) NULL, OWNER_NAME nvarchar(128) NULL, - OWNER_TYPE nvarchar(10) NULL + OWNER_TYPE nvarchar(10) NULL, + CTLG_NAME nvarchar(256) ); ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID); @@ -374,6 +377,7 @@ ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); CREATE TABLE MV_CREATION_METADATA ( MV_CREATION_METADATA_ID bigint NOT NULL, + CAT_NAME nvarchar(256) NOT NULL, DB_NAME nvarchar(128) NOT NULL, TBL_NAME nvarchar(256) NOT NULL, TXN_LIST text NULL @@ -382,6 +386,7 @@ CREATE TABLE MV_CREATION_METADATA ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID); CREATE INDEX MV_UNIQUE_TABLE ON MV_CREATION_METADATA (TBL_NAME,DB_NAME); + CREATE TABLE MV_TABLES_USED ( MV_CREATION_METADATA_ID bigint NOT NULL, @@ -411,6 +416,7 @@ ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID); CREATE TABLE PARTITION_EVENTS ( PART_NAME_ID bigint NOT NULL, + CAT_NAME nvarchar(256) NULL, DB_NAME nvarchar(128) NULL, EVENT_TIME bigint NOT NULL, EVENT_TYPE int NOT NULL, @@ -604,6 +610,7 @@ CREATE TABLE NOTIFICATION_LOG EVENT_ID bigint NOT NULL, EVENT_TIME int NOT NULL, EVENT_TYPE nvarchar(32) NOT NULL, + CAT_NAME nvarchar(128) NULL, DB_NAME nvarchar(128) NULL, TBL_NAME nvarchar(256) NULL, MESSAGE_FORMAT nvarchar(16), @@ -677,6 +684,15 @@ CREATE TABLE WM_MAPPING ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID); +CREATE TABLE CTLGS ( + CTLG_ID bigint primary key, + "NAME" nvarchar(256), + "DESC" nvarchar(4000), + LOCATION_URI nvarchar(4000) not null +); + +CREATE UNIQUE INDEX UNIQUE_CTLG ON CTLGS ("NAME"); + -- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey] -- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex] @@ -770,7 +786,7 @@ CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_T -- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase] -CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME"); +CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME", "CTLG_NAME"); -- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege] @@ -958,6 +974,7 @@ CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); +ALTER TABLE DBS ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES CTLGS ("NAME"); -- ----------------------------------------------------------------------------------------------------------------------------------------------- -- Transaction and Lock Tables -- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file. diff --git standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql index c5041b304f..b7803297ea 100644 --- standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql +++ standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql @@ -140,6 +140,7 @@ CREATE TABLE "SCHEMA_VERSION" ( CREATE TABLE MV_CREATION_METADATA ( MV_CREATION_METADATA_ID bigint NOT NULL, + CAT_NAME nvarchar(256) NOT NULL, DB_NAME nvarchar(128) NOT NULL, TBL_NAME nvarchar(256) NOT NULL, TXN_LIST text NULL @@ -212,6 +213,64 @@ ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_WRITEID bigint; ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_TXNID bigint NOT NULL; +-- HIVE-18755, add catalogs +-- new catalog table +CREATE TABLE CTLGS ( + CTLG_ID bigint primary key, + "NAME" nvarchar(256), + "DESC" nvarchar(4000), + LOCATION_URI nvarchar(4000) not null +); + +-- Create unique index on CTLGS.NAME +CREATE UNIQUE INDEX UNIQUE_CTLG ON CTLGS ("NAME"); + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO CTLGS VALUES (1, 'hive', 'Default catalog for Hive', 'TBD'); + +-- Drop the unique index on DBS +DROP INDEX UNIQUEDATABASE ON DBS; + +-- Add the new column to the DBS table, can't put in the not null constraint yet +ALTER TABLE DBS ADD CTLG_NAME nvarchar(256); + +-- Update all records in the DBS table to point to the Hive catalog +UPDATE DBS + SET "CTLG_NAME" = 'hive'; + +-- Add the not null constraint +ALTER TABLE DBS ALTER COLUMN CTLG_NAME nvarchar(256) NOT NULL; + +-- Put back the unique index +CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME", "CTLG_NAME"); + +-- Add the foreign key +ALTER TABLE DBS ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES CTLGS ("NAME"); + +-- Add columns to table stats and part stats +ALTER TABLE TAB_COL_STATS ADD CAT_NAME nvarchar(256); +ALTER TABLE PART_COL_STATS ADD CAT_NAME nvarchar(256); + +-- Set the existing column names to Hive +UPDATE TAB_COL_STATS + SET CAT_NAME = 'hive'; +UPDATE PART_COL_STATS + SET CAT_NAME = 'hive'; + +-- Add the not null constraint +ALTER TABLE TAB_COL_STATS ALTER COLUMN CAT_NAME nvarchar(256) NOT NULL; +ALTER TABLE PART_COL_STATS ALTER COLUMN CAT_NAME nvarchar(256) NOT NULL; + +-- Rebuild the index for Part col stats. No such index for table stats, which seems weird +DROP INDEX PCS_STATS_IDX ON PART_COL_STATS; +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME, TABLE_NAME, COLUMN_NAME, PARTITION_NAME); + +-- Add columns to partition events +ALTER TABLE PARTITION_EVENTS ADD CAT_NAME nvarchar(256); + +-- Add columns to notification log +ALTER TABLE NOTIFICATION_LOG ADD CAT_NAME nvarchar(256); + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS MESSAGE; diff --git standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql index 3e2db2ab00..adbe129beb 100644 --- standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql +++ standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql @@ -77,6 +77,15 @@ CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` ( ) ENGINE=InnoDB DEFAULT CHARSET=latin1; /*!40101 SET character_set_client = @saved_cs_client */; +CREATE TABLE `CTLGS` ( + `CTLG_ID` BIGINT PRIMARY KEY, + `NAME` VARCHAR(256), + `DESC` VARCHAR(4000), + `LOCATION_URI` VARCHAR(4000) NOT NULL, + UNIQUE KEY `UNIQUE_CATALOG` (`NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + + -- -- Table structure for table `DBS` -- @@ -90,8 +99,10 @@ CREATE TABLE IF NOT EXISTS `DBS` ( `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `CTLG_NAME` varchar(256) NOT NULL, PRIMARY KEY (`DB_ID`), - UNIQUE KEY `UNIQUE_DATABASE` (`NAME`) + UNIQUE KEY `UNIQUE_DATABASE` (`NAME`, `CTLG_NAME`), + CONSTRAINT `CTLG_FK1` FOREIGN KEY (`CTLG_NAME`) REFERENCES `CTLGS` (`NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; /*!40101 SET character_set_client = @saved_cs_client */; @@ -228,6 +239,7 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` ( /*!40101 SET character_set_client = utf8 */; CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` ( `PART_NAME_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, `EVENT_TIME` bigint(20) NOT NULL, `EVENT_TYPE` int(11) NOT NULL, @@ -581,6 +593,7 @@ CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` ( /*!40101 SET character_set_client = utf8 */; CREATE TABLE IF NOT EXISTS `MV_CREATION_METADATA` ( `MV_CREATION_METADATA_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `TXN_LIST` TEXT DEFAULT NULL, @@ -684,6 +697,7 @@ CREATE TABLE IF NOT EXISTS `TBL_PRIVS` ( -- CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` ( `CS_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, @@ -712,6 +726,7 @@ CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` ( -- CREATE TABLE IF NOT EXISTS `PART_COL_STATS` ( `CS_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, @@ -736,7 +751,7 @@ CREATE TABLE IF NOT EXISTS `PART_COL_STATS` ( CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE; +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE; -- -- Table structure for table `TYPES` @@ -833,6 +848,7 @@ CREATE TABLE IF NOT EXISTS `NOTIFICATION_LOG` `EVENT_ID` BIGINT(20) NOT NULL, `EVENT_TIME` INT(11) NOT NULL, `EVENT_TYPE` varchar(32) NOT NULL, + `CAT_NAME` varchar(256), `DB_NAME` varchar(128), `TBL_NAME` varchar(256), `MESSAGE` longtext, diff --git standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql index 5a483abbb8..20f7c8d9d9 100644 --- standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql +++ standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql @@ -129,6 +129,7 @@ CREATE TABLE `SCHEMA_VERSION` ( -- 048-HIVE-14498 CREATE TABLE IF NOT EXISTS `MV_CREATION_METADATA` ( `MV_CREATION_METADATA_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `TXN_LIST` TEXT DEFAULT NULL, @@ -202,6 +203,62 @@ ALTER TABLE `KEY_CONSTRAINTS` ADD COLUMN `DEFAULT_VALUE` VARCHAR(400); ALTER TABLE `HIVE_LOCKS` CHANGE COLUMN `HL_TXNID` `HL_TXNID` bigint NOT NULL; +-- HIVE-18755, add catalogs +-- new catalogs table +CREATE TABLE `CTLGS` ( + `CTLG_ID` BIGINT PRIMARY KEY, + `NAME` VARCHAR(256), + `DESC` VARCHAR(4000), + `LOCATION_URI` VARCHAR(4000) NOT NULL, + UNIQUE KEY `UNIQUE_CATALOG` (`NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO `CTLGS` VALUES (1, 'hive', 'Default catalog for Hive', 'TBD'); + +-- Drop the unique index on DBS +ALTER TABLE `DBS` DROP KEY `UNIQUE_DATABASE`; + +-- Add the new column to the DBS table, can't put in the not null constraint yet +ALTER TABLE `DBS` ADD COLUMN `CTLG_NAME` VARCHAR(256); + +-- Update all records in the DBS table to point to the Hive catalog +UPDATE `DBS` + SET `CTLG_NAME` = 'hive'; + +-- Add the not null constraint +ALTER TABLE `DBS` CHANGE COLUMN `CTLG_NAME` `CTLG_NAME` varchar(256) NOT NULL; + +-- Put back the unique index +ALTER TABLE `DBS` ADD UNIQUE KEY `UNIQUE_DATABASE` (`NAME`, `CTLG_NAME`); + +-- Add the foreign key +ALTER TABLE `DBS` ADD CONSTRAINT `CTLG_FK1` FOREIGN KEY (`CTLG_NAME`) REFERENCES `CTLGS` (`NAME`); + +-- Add columns to table stats and part stats +ALTER TABLE `TAB_COL_STATS` ADD COLUMN `CAT_NAME` varchar(256); +ALTER TABLE `PART_COL_STATS` ADD COLUMN `CAT_NAME` varchar(256); + +-- Set the existing column names to Hive +UPDATE `TAB_COL_STATS` + SET `CAT_NAME` = 'hive'; +UPDATE `PART_COL_STATS` + SET `CAT_NAME` = 'hive'; + +-- Add the not null constraint +ALTER TABLE `TAB_COL_STATS` CHANGE COLUMN `CAT_NAME` `CAT_NAME` varchar(256) NOT NULL; +ALTER TABLE `PART_COL_STATS` CHANGE COLUMN `CAT_NAME` `CAT_NAME` varchar(256) NOT NULL; + +-- Rebuild the index for Part col stats. No such index for table stats, which seems weird +DROP INDEX `PCS_STATS_IDX` ON `PART_COL_STATS`; +CREATE INDEX `PCS_STATS_IDX` ON `PART_COL_STATS` (`CAT_NAME`, `DB_NAME`, `TABLE_NAME`, `COLUMN_NAME`, `PARTITION_NAME`); + +-- Add column to partition events +ALTER TABLE `PARTITION_EVENTS` ADD COLUMN `CAT_NAME` varchar(256); + +-- Add column to notification log +ALTER TABLE `NOTIFICATION_LOG` ADD COLUMN `CAT_NAME` varchar(256); + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS ' '; diff --git standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql index 09c40ada49..755a8a808d 100644 --- standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql +++ standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql @@ -72,6 +72,14 @@ CREATE TABLE PARTITION_KEY_VALS ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX); +CREATE TABLE CTLGS ( + CTLG_ID NUMBER PRIMARY KEY, + "NAME" VARCHAR2(256), + "DESC" VARCHAR2(4000), + LOCATION_URI VARCHAR2(4000) NOT NULL, + UNIQUE ("NAME") +); + -- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase] CREATE TABLE DBS ( @@ -80,7 +88,8 @@ CREATE TABLE DBS DB_LOCATION_URI VARCHAR2(4000) NOT NULL, "NAME" VARCHAR2(128) NULL, OWNER_NAME VARCHAR2(128) NULL, - OWNER_TYPE VARCHAR2(10) NULL + OWNER_TYPE VARCHAR2(10) NULL, + CTLG_NAME VARCHAR2(256) ); ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID); @@ -389,6 +398,7 @@ ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); CREATE TABLE MV_CREATION_METADATA ( MV_CREATION_METADATA_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NOT NULL, DB_NAME VARCHAR2(128) NOT NULL, TBL_NAME VARCHAR2(256) NOT NULL, TXN_LIST CLOB NULL @@ -409,6 +419,7 @@ CREATE TABLE MV_TABLES_USED CREATE TABLE PARTITION_EVENTS ( PART_NAME_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NULL, DB_NAME VARCHAR2(128) NULL, EVENT_TIME NUMBER NOT NULL, EVENT_TYPE NUMBER (10) NOT NULL, @@ -486,10 +497,13 @@ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_L ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; +ALTER TABLE DBS ADD CONSTRAINT CTLGS_FK FOREIGN KEY (CTLG_NAME) REFERENCES CTLGS ("NAME") INITIALLY DEFERRED; + -- column statistics CREATE TABLE TAB_COL_STATS ( CS_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NOT NULL, DB_NAME VARCHAR2(128) NOT NULL, TABLE_NAME VARCHAR2(256) NOT NULL, COLUMN_NAME VARCHAR2(767) NOT NULL, @@ -526,6 +540,7 @@ CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID); CREATE TABLE PART_COL_STATS ( CS_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NOT NULL, DB_NAME VARCHAR2(128) NOT NULL, TABLE_NAME VARCHAR2(256) NOT NULL, PARTITION_NAME VARCHAR2(767) NOT NULL, @@ -554,7 +569,7 @@ ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID); -CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); CREATE TABLE FUNCS ( FUNC_ID NUMBER NOT NULL, @@ -584,6 +599,7 @@ CREATE TABLE NOTIFICATION_LOG EVENT_ID NUMBER NOT NULL, EVENT_TIME NUMBER(10) NOT NULL, EVENT_TYPE VARCHAR2(32) NOT NULL, + CAT_NAME VARCHAR2(256), DB_NAME VARCHAR2(128), TBL_NAME VARCHAR2(256), MESSAGE CLOB NULL, @@ -678,7 +694,7 @@ CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID); -- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase] -CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME"); +CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME", CTLG_NAME); -- Constraints for table PARTITION_PARAMS diff --git standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql index f95819ef09..cd94c017e1 100644 --- standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql +++ standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql @@ -142,6 +142,7 @@ CREATE TABLE "SCHEMA_VERSION" ( CREATE TABLE MV_CREATION_METADATA ( MV_CREATION_METADATA_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NOT NULL, DB_NAME VARCHAR2(128) NOT NULL, TBL_NAME VARCHAR2(256) NOT NULL, TXN_LIST CLOB NULL @@ -188,7 +189,6 @@ UPDATE DBS SET DB_LOCATION_URI = 's3a' || SUBSTR(DB_LOCATION_URI, 4) WHERE DB_LOCATION_URI LIKE 's3n://%' ; - -- HIVE-18192 CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID number(19) NOT NULL, @@ -222,6 +222,62 @@ ALTER TABLE KEY_CONSTRAINTS ADD DEFAULT_VALUE VARCHAR(400); ALTER TABLE HIVE_LOCKS MODIFY(HL_TXNID NOT NULL); +-- HIVE-18755, add catalogs +-- new catalogs table +CREATE TABLE CTLGS ( + CTLG_ID NUMBER PRIMARY KEY, + "NAME" VARCHAR2(256), + "DESC" VARCHAR2(4000), + LOCATION_URI VARCHAR2(4000) NOT NULL, + UNIQUE ("NAME") +); + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO CTLGS VALUES (1, 'hive', 'Default catalog for Hive', 'TBD'); + +-- Drop the unique index on DBS +DROP INDEX UNIQUE_DATABASE; + +-- Add the new column to the DBS table, can't put in the not null constraint yet +ALTER TABLE DBS ADD CTLG_NAME VARCHAR2(256); + +-- Update all records in the DBS table to point to the Hive catalog +UPDATE DBS + SET "CTLG_NAME" = 'hive'; + +-- Add the not null constraint +ALTER TABLE DBS MODIFY CTLG_NAME NOT NULL; + +-- Put back the unique index +CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME", CTLG_NAME); + +-- Add the foreign key +ALTER TABLE DBS ADD CONSTRAINT CTLGS_FK FOREIGN KEY (CTLG_NAME) REFERENCES CTLGS ("NAME") INITIALLY DEFERRED; + +-- Add columns to table stats and part stats +ALTER TABLE TAB_COL_STATS ADD CAT_NAME VARCHAR2(256); +ALTER TABLE PART_COL_STATS ADD CAT_NAME VARCHAR2(256); + +-- Set the existing column names to Hive +UPDATE TAB_COL_STATS + SET CAT_NAME = 'hive'; +UPDATE PART_COL_STATS + SET CAT_NAME = 'hive'; + +-- Add the not null constraint +ALTER TABLE TAB_COL_STATS MODIFY CAT_NAME NOT NULL; +ALTER TABLE PART_COL_STATS MODIFY CAT_NAME NOT NULL; + +-- Rebuild the index for Part col stats. No such index for table stats, which seems weird +DROP INDEX PCS_STATS_IDX; +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); + +-- Add column to partition events +ALTER TABLE PARTITION_EVENTS ADD CAT_NAME VARCHAR2(256); + +-- Add column to notification log +ALTER TABLE NOTIFICATION_LOG ADD CAT_NAME VARCHAR2(256); + -- These lines need to be last. Insert any changes above. UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual; diff --git standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql index 69317b0e09..72e5966cde 100644 --- standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql +++ standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql @@ -59,6 +59,13 @@ CREATE TABLE "DATABASE_PARAMS" ( ); +CREATE TABLE "CTLGS" ( + "CTLG_ID" BIGINT PRIMARY KEY, + "NAME" VARCHAR(256) UNIQUE, + "DESC" VARCHAR(4000), + "LOCATION_URI" VARCHAR(4000) NOT NULL +); + -- -- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: -- @@ -69,7 +76,8 @@ CREATE TABLE "DBS" ( "DB_LOCATION_URI" character varying(4000) NOT NULL, "NAME" character varying(128) DEFAULT NULL::character varying, "OWNER_NAME" character varying(128) DEFAULT NULL::character varying, - "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying + "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying, + "CTLG_NAME" varchar(256) ); @@ -168,6 +176,7 @@ CREATE TABLE "PARTITIONS" ( CREATE TABLE "PARTITION_EVENTS" ( "PART_NAME_ID" bigint NOT NULL, + "CAT_NAME" character varying(256), "DB_NAME" character varying(128), "EVENT_TIME" bigint NOT NULL, "EVENT_TYPE" integer NOT NULL, @@ -386,6 +395,7 @@ CREATE TABLE "TBLS" ( CREATE TABLE "MV_CREATION_METADATA" ( "MV_CREATION_METADATA_ID" bigint NOT NULL, + "CAT_NAME" character varying(256) NOT NULL, "DB_NAME" character varying(128) NOT NULL, "TBL_NAME" character varying(256) NOT NULL, "TXN_LIST" text @@ -508,6 +518,7 @@ CREATE TABLE "DELEGATION_TOKENS" CREATE TABLE "TAB_COL_STATS" ( "CS_ID" bigint NOT NULL, + "CAT_NAME" character varying(256) DEFAULT NULL::character varying, "DB_NAME" character varying(128) DEFAULT NULL::character varying, "TABLE_NAME" character varying(256) DEFAULT NULL::character varying, "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying, @@ -544,6 +555,7 @@ CREATE TABLE "VERSION" ( CREATE TABLE "PART_COL_STATS" ( "CS_ID" bigint NOT NULL, + "CAT_NAME" character varying(256) DEFAULT NULL::character varying, "DB_NAME" character varying(128) DEFAULT NULL::character varying, "TABLE_NAME" character varying(256) DEFAULT NULL::character varying, "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying, @@ -598,6 +610,7 @@ CREATE TABLE "NOTIFICATION_LOG" "EVENT_ID" BIGINT NOT NULL, "EVENT_TIME" INTEGER NOT NULL, "EVENT_TYPE" VARCHAR(32) NOT NULL, + "CAT_NAME" VARCHAR(256), "DB_NAME" VARCHAR(128), "TBL_NAME" VARCHAR(256), "MESSAGE" text, @@ -1182,7 +1195,7 @@ CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID"); -- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: -- -CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); +CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); -- @@ -1556,6 +1569,7 @@ ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY -- ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; +ALTER TABLE "DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "CTLGS" ("NAME"); ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID"); diff --git standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql index f7d8c73c39..7b4bd68eec 100644 --- standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql +++ standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql @@ -155,6 +155,7 @@ CREATE TABLE "SCHEMA_VERSION" ( -- 047-HIVE-14498 CREATE TABLE "MV_CREATION_METADATA" ( "MV_CREATION_METADATA_ID" bigint NOT NULL, + "CAT_NAME" character varying(256) NOT NULL, "DB_NAME" character varying(128) NOT NULL, "TBL_NAME" character varying(256) NOT NULL, "TXN_LIST" text @@ -237,6 +238,61 @@ ALTER TABLE "KEY_CONSTRAINTS" ADD COLUMN "DEFAULT_VALUE" VARCHAR(400); ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_TXNID SET NOT NULL; +-- HIVE-18755, add catalogs +-- new catalogs table +CREATE TABLE "CTLGS" ( + "CTLG_ID" BIGINT PRIMARY KEY, + "NAME" VARCHAR(256) UNIQUE, + "DESC" VARCHAR(4000), + "LOCATION_URI" VARCHAR(4000) NOT NULL +); + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO "CTLGS" VALUES (1, 'hive', 'Default catalog for Hive', 'TBD'); + +-- Drop the unique index on DBS +ALTER TABLE "DBS" DROP CONSTRAINT "UNIQUE_DATABASE"; + +-- Add the new column to the DBS table, can't put in the not null constraint yet +ALTER TABLE "DBS" ADD "CTLG_NAME" VARCHAR(256); + +-- Update all records in the DBS table to point to the Hive catalog +UPDATE "DBS" + SET "CTLG_NAME" = 'hive'; + +-- Add the not null constraint +ALTER TABLE "DBS" ALTER COLUMN "CTLG_NAME" SET NOT NULL; + +-- Put back the unique index +ALTER TABLE "DBS" ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME", "CTLG_NAME"); + +-- Add the foreign key +ALTER TABLE "DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "CTLGS" ("NAME"); + +-- Add columns to table stats and part stats +ALTER TABLE "TAB_COL_STATS" ADD "CAT_NAME" varchar(256); +ALTER TABLE "PART_COL_STATS" ADD "CAT_NAME" varchar(256); + +-- Set the existing column names to Hive +UPDATE "TAB_COL_STATS" + SET "CAT_NAME" = 'hive'; +UPDATE "PART_COL_STATS" + SET "CAT_NAME" = 'hive'; + +-- Add the not null constraint +ALTER TABLE "TAB_COL_STATS" ALTER COLUMN "CAT_NAME" SET NOT NULL; +ALTER TABLE "PART_COL_STATS" ALTER COLUMN "CAT_NAME" SET NOT NULL; + +-- Rebuild the index for Part col stats. No such index for table stats, which seems weird +DROP INDEX "PCS_STATS_IDX"; +CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" ("CAT_NAME", "DB_NAME", "TABLE_NAME", "COLUMN_NAME", "PARTITION_NAME"); + +-- Add column to partition event +ALTER TABLE "PARTITION_EVENTS" ADD "CAT_NAME" varchar(256); + +-- Add column to notification log +ALTER TABLE "NOTIFICATION_LOG" ADD "CAT_NAME" varchar(256); + -- These lines need to be last. Insert any changes above. UPDATE "VERSION" SET "SCHEMA_VERSION"='3.0.0', "VERSION_COMMENT"='Hive release version 3.0.0' where "VER_ID"=1; SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0'; diff --git standalone-metastore/src/main/thrift/hive_metastore.thrift standalone-metastore/src/main/thrift/hive_metastore.thrift index fb334c0c08..bff13e240a 100644 --- standalone-metastore/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/src/main/thrift/hive_metastore.thrift @@ -48,8 +48,9 @@ struct SQLPrimaryKey { 4: i32 key_seq, // sequence number within primary key 5: string pk_name, // primary key name 6: bool enable_cstr, // Enable/Disable - 7: bool validate_cstr, // Validate/No validate - 8: bool rely_cstr // Rely/No Rely + 7: bool validate_cstr, // Validate/No validate + 8: bool rely_cstr, // Rely/No Rely + 9: optional string catName } struct SQLForeignKey { @@ -66,50 +67,55 @@ struct SQLForeignKey { 11: string pk_name, // primary key name 12: bool enable_cstr, // Enable/Disable 13: bool validate_cstr, // Validate/No validate - 14: bool rely_cstr // Rely/No Rely + 14: bool rely_cstr, // Rely/No Rely + 15: optional string catName } struct SQLUniqueConstraint { - 1: string table_db, // table schema - 2: string table_name, // table name - 3: string column_name, // column name - 4: i32 key_seq, // sequence number within unique constraint - 5: string uk_name, // unique key name - 6: bool enable_cstr, // Enable/Disable - 7: bool validate_cstr, // Validate/No validate - 8: bool rely_cstr // Rely/No Rely + 1: string catName, // table catalog + 2: string table_db, // table schema + 3: string table_name, // table name + 4: string column_name, // column name + 5: i32 key_seq, // sequence number within unique constraint + 6: string uk_name, // unique key name + 7: bool enable_cstr, // Enable/Disable + 8: bool validate_cstr, // Validate/No validate + 9: bool rely_cstr, // Rely/No Rely } struct SQLNotNullConstraint { - 1: string table_db, // table schema - 2: string table_name, // table name - 3: string column_name, // column name - 4: string nn_name, // not null name - 5: bool enable_cstr, // Enable/Disable - 6: bool validate_cstr, // Validate/No validate - 7: bool rely_cstr // Rely/No Rely + 1: string catName, // table catalog + 2: string table_db, // table schema + 3: string table_name, // table name + 4: string column_name, // column name + 5: string nn_name, // not null name + 6: bool enable_cstr, // Enable/Disable + 7: bool validate_cstr, // Validate/No validate + 8: bool rely_cstr, // Rely/No Rely } struct SQLDefaultConstraint { - 1: string table_db, // table schema - 2: string table_name, // table name - 3: string column_name, // column name - 4: string default_value,// default value - 5: string dc_name, // default name - 6: bool enable_cstr, // Enable/Disable - 7: bool validate_cstr, // Validate/No validate - 8: bool rely_cstr // Rely/No Rely + 1: string catName, // catalog name + 2: string table_db, // table schema + 3: string table_name, // table name + 4: string column_name, // column name + 5: string default_value,// default value + 6: string dc_name, // default name + 7: bool enable_cstr, // Enable/Disable + 8: bool validate_cstr, // Validate/No validate + 9: bool rely_cstr // Rely/No Rely } struct SQLCheckConstraint { - 1: string table_db, // table schema - 2: string table_name, // table name - 3: string column_name, // column name - 4: string check_expression,// check expression - 5: string dc_name, // default name - 6: bool enable_cstr, // Enable/Disable - 7: bool validate_cstr, // Validate/No validate - 8: bool rely_cstr // Rely/No Rely + 1: string catName, // catalog name + 2: string table_db, // table schema + 3: string table_name, // table name + 4: string column_name, // column name + 5: string check_expression,// check expression + 6: string dc_name, // default name + 7: bool enable_cstr, // Enable/Disable + 8: bool validate_cstr, // Validate/No validate + 9: bool rely_cstr // Rely/No Rely } struct Type { @@ -233,6 +239,7 @@ struct HiveObjectRef{ 3: string objectName, 4: list partValues, 5: string columnName, + 6: optional string catName } struct PrivilegeGrantInfo { @@ -318,6 +325,34 @@ struct GrantRevokeRoleResponse { 1: optional bool success; } +struct Catalog { + 1: string name, // Name of the catalog + 2: optional string description, // description of the catalog + 3: string locationUri // default storage location. When databases are created in + // this catalog, if they do not specify a location, they will + // be placed in this location. +} + +struct CreateCatalogRequest { + 1: Catalog catalog +} + +struct GetCatalogRequest { + 1: string name +} + +struct GetCatalogResponse { + 1: Catalog catalog +} + +struct GetCatalogsResponse { + 1: list names +} + +struct DropCatalogRequest { + 1: string name +} + // namespace for tables struct Database { 1: string name, @@ -326,7 +361,8 @@ struct Database { 4: map parameters, // properties associated with the database 5: optional PrincipalPrivilegeSet privileges, 6: optional string ownerName, - 7: optional PrincipalType ownerType + 7: optional PrincipalType ownerType, + 8: optional string catalogName } // This object holds the information needed by SerDes @@ -386,7 +422,8 @@ struct Table { 13: optional PrincipalPrivilegeSet privileges, 14: optional bool temporary=false, 15: optional bool rewriteEnabled, // rewrite enabled or not - 16: optional CreationMetadata creationMetadata // only for MVs, it stores table names used and txn list at MV creation + 16: optional CreationMetadata creationMetadata, // only for MVs, it stores table names used and txn list at MV creation + 17: optional string catName // Name of the catalog the table is in } struct Partition { @@ -397,7 +434,8 @@ struct Partition { 5: i32 lastAccessTime, 6: StorageDescriptor sd, 7: map parameters, - 8: optional PrincipalPrivilegeSet privileges + 8: optional PrincipalPrivilegeSet privileges, + 9: optional string catName } struct PartitionWithoutSD { @@ -423,7 +461,8 @@ struct PartitionSpec { 2: string tableName, 3: string rootPath, 4: optional PartitionSpecWithSharedSD sharedSDPartitionSpec, - 5: optional PartitionListComposingSpec partitionList + 5: optional PartitionListComposingSpec partitionList, + 6: optional string catName } // column statistics @@ -512,7 +551,8 @@ struct ColumnStatisticsDesc { 2: required string dbName, 3: required string tableName, 4: optional string partName, -5: optional i64 lastAnalyzed +5: optional i64 lastAnalyzed, +6: optional string catName } struct ColumnStatistics { @@ -547,7 +587,8 @@ struct EnvironmentContext { struct PrimaryKeysRequest { 1: required string db_name, - 2: required string tbl_name + 2: required string tbl_name, + 3: optional string catName } struct PrimaryKeysResponse { @@ -559,6 +600,7 @@ struct ForeignKeysRequest { 2: string parent_tbl_name, 3: string foreign_db_name, 4: string foreign_tbl_name + 5: optional string catName // No cross catalog constraints } struct ForeignKeysResponse { @@ -566,8 +608,9 @@ struct ForeignKeysResponse { } struct UniqueConstraintsRequest { - 1: required string db_name, - 2: required string tbl_name + 1: required string catName, + 2: required string db_name, + 3: required string tbl_name, } struct UniqueConstraintsResponse { @@ -575,8 +618,9 @@ struct UniqueConstraintsResponse { } struct NotNullConstraintsRequest { - 1: required string db_name, - 2: required string tbl_name + 1: required string catName, + 2: required string db_name, + 3: required string tbl_name, } struct NotNullConstraintsResponse { @@ -584,8 +628,9 @@ struct NotNullConstraintsResponse { } struct DefaultConstraintsRequest { - 1: required string db_name, - 2: required string tbl_name + 1: required string catName, + 2: required string db_name, + 3: required string tbl_name } struct DefaultConstraintsResponse { @@ -593,8 +638,9 @@ struct DefaultConstraintsResponse { } struct CheckConstraintsRequest { - 1: required string db_name, - 2: required string tbl_name + 1: required string catName, + 2: required string db_name, + 3: required string tbl_name } struct CheckConstraintsResponse { @@ -605,7 +651,8 @@ struct CheckConstraintsResponse { struct DropConstraintRequest { 1: required string dbname, 2: required string tablename, - 3: required string constraintname + 3: required string constraintname, + 4: optional string catName } struct AddPrimaryKeyRequest { @@ -645,6 +692,7 @@ struct PartitionsByExprRequest { 3: required binary expr, 4: optional string defaultPartitionName, 5: optional i16 maxParts=-1 + 6: optional string catName } struct TableStatsResult { @@ -659,13 +707,15 @@ struct TableStatsRequest { 1: required string dbName, 2: required string tblName, 3: required list colNames + 4: optional string catName } struct PartitionsStatsRequest { 1: required string dbName, 2: required string tblName, 3: required list colNames, - 4: required list partNames + 4: required list partNames, + 5: optional string catName } // Return type for add_partitions_req @@ -679,7 +729,8 @@ struct AddPartitionsRequest { 2: required string tblName, 3: required list parts, 4: required bool ifNotExists, - 5: optional bool needResult=true + 5: optional bool needResult=true, + 6: optional string catName } // Return type for drop_partitions_req @@ -707,7 +758,8 @@ struct DropPartitionsRequest { 5: optional bool ifExists=true, // currently verified on client 6: optional bool ignoreProtection, 7: optional EnvironmentContext environmentContext, - 8: optional bool needResult=true + 8: optional bool needResult=true, + 9: optional string catName } struct PartitionValuesRequest { @@ -719,6 +771,7 @@ struct PartitionValuesRequest { 6: optional list partitionOrder; 7: optional bool ascending = true; 8: optional i64 maxParts = -1; + 9: optional string catName } struct PartitionValuesRow { @@ -754,6 +807,7 @@ struct Function { 6: i32 createTime, 7: FunctionType functionType, 8: list resourceUris, + 9: optional string catName } // Structs for transaction and locks @@ -977,10 +1031,11 @@ struct BasicTxnInfo { } struct CreationMetadata { - 1: required string dbName, - 2: required string tblName, - 3: required set tablesUsed, - 4: optional string validTxnList + 1: required string catName + 2: required string dbName, + 3: required string tblName, + 4: required set tablesUsed, + 5: optional string validTxnList, } struct NotificationEventRequest { @@ -996,6 +1051,7 @@ struct NotificationEvent { 5: optional string tableName, 6: required string message, 7: optional string messageFormat, + 8: optional string catName } struct NotificationEventResponse { @@ -1009,6 +1065,7 @@ struct CurrentNotificationEventId { struct NotificationEventsCountRequest { 1: required i64 fromEventId, 2: required string dbName, + 3: optional string catName } struct NotificationEventsCountResponse { @@ -1034,6 +1091,7 @@ struct FireEventRequest { 3: optional string dbName, 4: optional string tableName, 5: optional list partitionVals, + 6: optional string catName, } struct FireEventResponse { @@ -1125,7 +1183,8 @@ struct ClientCapabilities { struct GetTableRequest { 1: required string dbName, 2: required string tblName, - 3: optional ClientCapabilities capabilities + 3: optional ClientCapabilities capabilities, + 4: optional string catName } struct GetTableResult { @@ -1135,7 +1194,8 @@ struct GetTableResult { struct GetTablesRequest { 1: required string dbName, 2: optional list tblNames, - 3: optional ClientCapabilities capabilities + 3: optional ClientCapabilities capabilities, + 4: optional string catName } struct GetTablesResult { @@ -1157,6 +1217,7 @@ struct TableMeta { 2: required string tableName; 3: required string tableType; 4: optional string comments; + 5: optional string catName; } struct Materialization { @@ -1384,17 +1445,19 @@ struct WMCreateOrDropTriggerToPoolMappingResponse { struct ISchema { 1: SchemaType schemaType, 2: string name, - 3: string dbName, - 4: SchemaCompatibility compatibility, - 5: SchemaValidation validationLevel, - 6: bool canEvolve, - 7: optional string schemaGroup, - 8: optional string description + 3: string catName, + 4: string dbName, + 5: SchemaCompatibility compatibility, + 6: SchemaValidation validationLevel, + 7: bool canEvolve, + 8: optional string schemaGroup, + 9: optional string description } struct ISchemaName { - 1: string dbName, - 2: string schemaName + 1: string catName, + 2: string dbName, + 3: string schemaName } struct AlterISchemaRequest { @@ -1515,6 +1578,11 @@ service ThriftHiveMetastore extends fb303.FacebookService string getMetaConf(1:string key) throws(1:MetaException o1) void setMetaConf(1:string key, 2:string value) throws(1:MetaException o1) + void create_catalog(1: CreateCatalogRequest catalog) throws (1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3: MetaException o3) + GetCatalogResponse get_catalog(1: GetCatalogRequest catName) throws (1:NoSuchObjectException o1, 2:MetaException o2) + GetCatalogsResponse get_catalogs() throws (1:MetaException o1) + void drop_catalog(1: DropCatalogRequest catName) throws (1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3) + void create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2) void drop_database(1:string name, 2:bool deleteData, 3:bool cascade) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3) @@ -1597,7 +1665,7 @@ service ThriftHiveMetastore extends fb303.FacebookService throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3) map get_materialization_invalidation_info(1:string dbname, 2:list tbl_names) throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3) - void update_creation_metadata(1:string dbname, 2:string tbl_name, 3:CreationMetadata creation_metadata) + void update_creation_metadata(1: string catName, 2:string dbname, 3:string tbl_name, 4:CreationMetadata creation_metadata) throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3) // Get a list of table names that match a filter. diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index acad6760ac..304f567533 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.ISchemaName; import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import java.nio.ByteBuffer; @@ -152,36 +153,62 @@ public void rollbackTransaction() { } @Override + public void createCatalog(Catalog cat) throws MetaException { + objectStore.createCatalog(cat); + } + + @Override + public void alterCatalog(String catName, Catalog cat) throws MetaException, + InvalidOperationException { + objectStore.alterCatalog(catName, cat); + } + + @Override + public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException { + return objectStore.getCatalog(catalogName); + } + + @Override + public List getCatalogs() throws MetaException { + return objectStore.getCatalogs(); + } + + @Override + public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException { + objectStore.dropCatalog(catalogName); + } + + @Override public void createDatabase(Database db) throws InvalidObjectException, MetaException { objectStore.createDatabase(db); } @Override - public Database getDatabase(String dbName) throws NoSuchObjectException { - return objectStore.getDatabase(dbName); + public Database getDatabase(String catName, String dbName) throws NoSuchObjectException { + return objectStore.getDatabase(catName, dbName); } @Override - public boolean dropDatabase(String dbName) + public boolean dropDatabase(String catName, String dbName) throws NoSuchObjectException, MetaException { - return objectStore.dropDatabase(dbName); + return objectStore.dropDatabase(catName, dbName); } @Override - public boolean alterDatabase(String dbName, Database db) + public boolean alterDatabase(String catName, String dbName, Database db) throws NoSuchObjectException, MetaException { - return objectStore.alterDatabase(dbName, db); + return objectStore.alterDatabase(catName, dbName, db); } @Override - public List getDatabases(String pattern) throws MetaException { - return objectStore.getDatabases(pattern); + public List getDatabases(String catName, String pattern) throws MetaException { + return objectStore.getDatabases(catName, pattern); } @Override - public List getAllDatabases() throws MetaException { - return objectStore.getAllDatabases(); + public List getAllDatabases(String catName) throws MetaException { + return objectStore.getAllDatabases(catName); } @Override @@ -205,15 +232,15 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException } @Override - public boolean dropTable(String dbName, String tableName) + public boolean dropTable(String catName, String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - return objectStore.dropTable(dbName, tableName); + return objectStore.dropTable(catName, dbName, tableName); } @Override - public Table getTable(String dbName, String tableName) throws MetaException { - return objectStore.getTable(dbName, tableName); + public Table getTable(String catName, String dbName, String tableName) throws MetaException { + return objectStore.getTable(catName, dbName, tableName); } @Override @@ -223,150 +250,145 @@ public boolean addPartition(Partition part) } @Override - public Partition getPartition(String dbName, String tableName, List partVals) + public Partition getPartition(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException { - return objectStore.getPartition(dbName, tableName, partVals); + return objectStore.getPartition(catName, dbName, tableName, partVals); } @Override - public boolean dropPartition(String dbName, String tableName, List partVals) + public boolean dropPartition(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - return objectStore.dropPartition(dbName, tableName, partVals); + return objectStore.dropPartition(catName, dbName, tableName, partVals); } @Override - public List getPartitions(String dbName, String tableName, int max) + public List getPartitions(String catName, String dbName, String tableName, int max) throws MetaException, NoSuchObjectException { - return objectStore.getPartitions(dbName, tableName, max); + return objectStore.getPartitions(catName, dbName, tableName, max); } @Override - public void alterTable(String dbName, String name, Table newTable) + public void alterTable(String catName, String dbName, String name, Table newTable) throws InvalidObjectException, MetaException { - objectStore.alterTable(dbName, name, newTable); + objectStore.alterTable(catName, dbName, name, newTable); } @Override - public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) throws MetaException { - objectStore.updateCreationMetadata(dbname, tablename, cm); + objectStore.updateCreationMetadata(catName, dbname, tablename, cm); } - @Override - public List getTables(String dbName, String pattern) throws MetaException { - return objectStore.getTables(dbName, pattern); + public List getTables(String catName, String dbName, String pattern) throws MetaException { + return objectStore.getTables(catName, dbName, pattern); } @Override - public List getTables(String dbName, String pattern, TableType tableType) throws MetaException { - return objectStore.getTables(dbName, pattern, tableType); + public List getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException { + return objectStore.getTables(catName, dbName, pattern, tableType); } @Override - public List getMaterializedViewsForRewriting(String dbName) + public List getMaterializedViewsForRewriting(String catName, String dbName) throws MetaException, NoSuchObjectException { - return objectStore.getMaterializedViewsForRewriting(dbName); + return objectStore.getMaterializedViewsForRewriting(catName, dbName); } @Override - public List getTableMeta(String dbNames, String tableNames, List tableTypes) + public List getTableMeta(String catName, String dbNames, String tableNames, List tableTypes) throws MetaException { - return objectStore.getTableMeta(dbNames, tableNames, tableTypes); + return objectStore.getTableMeta(catName, dbNames, tableNames, tableTypes); } @Override - public List

getTableObjectsByName(String dbName, List tableNames) + public List
getTableObjectsByName(String catName, String dbName, List tableNames) throws MetaException, UnknownDBException { - return objectStore.getTableObjectsByName(dbName, tableNames); + return objectStore.getTableObjectsByName(catName, dbName, tableNames); } @Override - public List getAllTables(String dbName) throws MetaException { - return objectStore.getAllTables(dbName); + public List getAllTables(String catName, String dbName) throws MetaException { + return objectStore.getAllTables(catName, dbName); } @Override - public List listTableNamesByFilter(String dbName, String filter, + public List listTableNamesByFilter(String catName, String dbName, String filter, short maxTables) throws MetaException, UnknownDBException { - return objectStore.listTableNamesByFilter(dbName, filter, maxTables); + return objectStore.listTableNamesByFilter(catName, dbName, filter, maxTables); } @Override - public List listPartitionNames(String dbName, String tblName, short maxParts) + public List listPartitionNames(String catName, String dbName, String tblName, short maxParts) throws MetaException { - return objectStore.listPartitionNames(dbName, tblName, maxParts); + return objectStore.listPartitionNames(catName, dbName, tblName, maxParts); } @Override - public PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException { + public PartitionValuesResponse listPartitionValues(String catName, String db_name, + String tbl_name, List cols, boolean applyDistinct, String filter, + boolean ascending, List order, long maxParts) throws MetaException { return null; } @Override - public List listPartitionNamesByFilter(String dbName, String tblName, - String filter, short maxParts) throws MetaException { - return objectStore.listPartitionNamesByFilter(dbName, tblName, filter, maxParts); - } - - @Override - public void alterPartition(String dbName, String tblName, List partVals, + public void alterPartition(String catName, String dbName, String tblName, List partVals, Partition newPart) throws InvalidObjectException, MetaException { - objectStore.alterPartition(dbName, tblName, partVals, newPart); + objectStore.alterPartition(catName, dbName, tblName, partVals, newPart); } @Override - public void alterPartitions(String dbName, String tblName, + public void alterPartitions(String catName, String dbName, String tblName, List> partValsList, List newParts) throws InvalidObjectException, MetaException { - objectStore.alterPartitions(dbName, tblName, partValsList, newParts); + objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts); } @Override - public List getPartitionsByFilter(String dbName, String tblName, + public List getPartitionsByFilter(String catName, String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionsByFilter(dbName, tblName, filter, maxParts); + return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts); } @Override - public int getNumPartitionsByFilter(String dbName, String tblName, + public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException { - return objectStore.getNumPartitionsByFilter(dbName, tblName, filter); + return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter); } @Override - public int getNumPartitionsByExpr(String dbName, String tblName, + public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException { - return objectStore.getNumPartitionsByExpr(dbName, tblName, expr); + return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); } @Override - public List getPartitionsByNames(String dbName, String tblName, + public List getPartitionsByNames(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionsByNames(dbName, tblName, partNames); + return objectStore.getPartitionsByNames(catName, dbName, tblName, partNames); } @Override - public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, + public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException { - return objectStore.getPartitionsByExpr( + return objectStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); } @Override - public Table markPartitionForEvent(String dbName, String tblName, + public Table markPartitionForEvent(String catName, String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { - return objectStore.markPartitionForEvent(dbName, tblName, partVals, evtType); + return objectStore.markPartitionForEvent(catName, dbName, tblName, partVals, evtType); } @Override - public boolean isPartitionMarkedForEvent(String dbName, String tblName, + public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { - return objectStore.isPartitionMarkedForEvent(dbName, tblName, partName, evtType); + return objectStore.isPartitionMarkedForEvent(catName, dbName, tblName, partName, evtType); } @Override @@ -402,31 +424,31 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, } @Override - public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, + public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return objectStore.getDBPrivilegeSet(dbName, userName, groupNames); + return objectStore.getDBPrivilegeSet(catName, dbName, userName, groupNames); } @Override - public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return objectStore.getTablePrivilegeSet(dbName, tableName, userName, groupNames); + return objectStore.getTablePrivilegeSet(catName, dbName, tableName, userName, groupNames); } @Override - public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName, String partition, String userName, List groupNames) throws InvalidObjectException, MetaException { - return objectStore.getPartitionPrivilegeSet(dbName, tableName, partition, + return objectStore.getPartitionPrivilegeSet(catName, dbName, tableName, partition, userName, groupNames); } @Override - public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return objectStore.getColumnPrivilegeSet(dbName, tableName, partitionName, + return objectStore.getColumnPrivilegeSet(catName, dbName, tableName, partitionName, columnName, userName, groupNames); } @@ -438,38 +460,38 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa @Override public List listPrincipalDBGrants(String principalName, - PrincipalType principalType, String dbName) { - return objectStore.listPrincipalDBGrants(principalName, principalType, dbName); + PrincipalType principalType, String catName, String dbName) { + return objectStore.listPrincipalDBGrants(principalName, principalType, catName, dbName); } @Override public List listAllTableGrants(String principalName, - PrincipalType principalType, String dbName, String tableName) { + PrincipalType principalType, String catName, String dbName, String tableName) { return objectStore.listAllTableGrants(principalName, principalType, - dbName, tableName); + catName, dbName, tableName); } @Override public List listPrincipalPartitionGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, List partValues, + PrincipalType principalType, String catName, String dbName, String tableName, List partValues, String partName) { return objectStore.listPrincipalPartitionGrants(principalName, principalType, - dbName, tableName, partValues, partName); + catName, dbName, tableName, partValues, partName); } @Override public List listPrincipalTableColumnGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, String columnName) { + PrincipalType principalType, String catName, String dbName, String tableName, String columnName) { return objectStore.listPrincipalTableColumnGrants(principalName, principalType, - dbName, tableName, columnName); + catName, dbName, tableName, columnName); } @Override public List listPrincipalPartitionColumnGrants( - String principalName, PrincipalType principalType, String dbName, String tableName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, List partVals, String partName, String columnName) { return objectStore.listPrincipalPartitionColumnGrants(principalName, principalType, - dbName, tableName, partVals, partName, columnName); + catName, dbName, tableName, partVals, partName, columnName); } @Override @@ -511,33 +533,33 @@ public Role getRole(String roleName) throws NoSuchObjectException { } @Override - public Partition getPartitionWithAuth(String dbName, String tblName, + public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { - return objectStore.getPartitionWithAuth(dbName, tblName, partVals, userName, + return objectStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); } @Override - public List getPartitionsWithAuth(String dbName, String tblName, + public List getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { - return objectStore.getPartitionsWithAuth(dbName, tblName, maxParts, userName, + return objectStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); } @Override - public List listPartitionNamesPs(String dbName, String tblName, + public List listPartitionNamesPs(String catName, String dbName, String tblName, List partVals, short maxParts) throws MetaException, NoSuchObjectException { - return objectStore.listPartitionNamesPs(dbName, tblName, partVals, maxParts); + return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts); } @Override - public List listPartitionsPsWithAuth(String dbName, String tblName, + public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, List partVals, short maxParts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { - return objectStore.listPartitionsPsWithAuth(dbName, tblName, partVals, maxParts, + return objectStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, userName, groupNames); } @@ -582,53 +604,52 @@ public long cleanupEvents() { } @Override - public List listDBGrantsAll(String dbName) { - return objectStore.listDBGrantsAll(dbName); + public List listDBGrantsAll(String catName, String dbName) { + return objectStore.listDBGrantsAll(catName, dbName); } @Override - public List listPartitionColumnGrantsAll(String dbName, String tableName, + public List listPartitionColumnGrantsAll(String catName, String dbName, String tableName, String partitionName, String columnName) { - return objectStore.listPartitionColumnGrantsAll(dbName, tableName, partitionName, columnName); + return objectStore.listPartitionColumnGrantsAll(catName, dbName, tableName, partitionName, columnName); } @Override - public List listTableGrantsAll(String dbName, String tableName) { - return objectStore.listTableGrantsAll(dbName, tableName); + public List listTableGrantsAll(String catName, String dbName, String tableName) { + return objectStore.listTableGrantsAll(catName, dbName, tableName); } @Override - public List listPartitionGrantsAll(String dbName, String tableName, + public List listPartitionGrantsAll(String catName, String dbName, String tableName, String partitionName) { - return objectStore.listPartitionGrantsAll(dbName, tableName, partitionName); + return objectStore.listPartitionGrantsAll(catName, dbName, tableName, partitionName); } @Override - public List listTableColumnGrantsAll(String dbName, String tableName, + public List listTableColumnGrantsAll(String catName, String dbName, String tableName, String columnName) { - return objectStore.listTableColumnGrantsAll(dbName, tableName, columnName); + return objectStore.listTableColumnGrantsAll(catName, dbName, tableName, columnName); } @Override - public ColumnStatistics getTableColumnStatistics(String dbName, String tableName, + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, List colNames) throws MetaException, NoSuchObjectException { - return objectStore.getTableColumnStatistics(dbName, tableName, colNames); + return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames); } @Override - public boolean deleteTableColumnStatistics(String dbName, String tableName, - String colName) - throws NoSuchObjectException, MetaException, InvalidObjectException, - InvalidInputException { - return objectStore.deleteTableColumnStatistics(dbName, tableName, colName); + public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, + String colName) + throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { + return objectStore.deleteTableColumnStatistics(catName, dbName, tableName, colName); } @Override - public boolean deletePartitionColumnStatistics(String dbName, String tableName, + public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - return objectStore.deletePartitionColumnStatistics(dbName, tableName, partName, + return objectStore.deletePartitionColumnStatistics(catName, dbName, tableName, partName, partVals, colName); } @@ -702,33 +723,33 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } @Override - public List getPartitionColumnStatistics(String dbName, + public List getPartitionColumnStatistics(String catName, String dbName, String tblName, List colNames, List partNames) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionColumnStatistics(dbName, tblName , colNames, partNames); + return objectStore.getPartitionColumnStatistics(catName, dbName, tblName , colNames, partNames); } @Override - public boolean doesPartitionExist(String dbName, String tableName, + public boolean doesPartitionExist(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException { - return objectStore.doesPartitionExist(dbName, tableName, partVals); + return objectStore.doesPartitionExist(catName, dbName, tableName, partVals); } @Override - public boolean addPartitions(String dbName, String tblName, List parts) + public boolean addPartitions(String catName, String dbName, String tblName, List parts) throws InvalidObjectException, MetaException { - return objectStore.addPartitions(dbName, tblName, parts); + return objectStore.addPartitions(catName, dbName, tblName, parts); } @Override - public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { + public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { return false; } @Override - public void dropPartitions(String dbName, String tblName, List partNames) + public void dropPartitions(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { - objectStore.dropPartitions(dbName, tblName, partNames); + objectStore.dropPartitions(catName, dbName, tblName, partNames); } @Override @@ -738,38 +759,38 @@ public void createFunction(Function func) throws InvalidObjectException, } @Override - public void alterFunction(String dbName, String funcName, Function newFunction) + public void alterFunction(String catName, String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException { - objectStore.alterFunction(dbName, funcName, newFunction); + objectStore.alterFunction(catName, dbName, funcName, newFunction); } @Override - public void dropFunction(String dbName, String funcName) + public void dropFunction(String catName, String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - objectStore.dropFunction(dbName, funcName); + objectStore.dropFunction(catName, dbName, funcName); } @Override - public Function getFunction(String dbName, String funcName) + public Function getFunction(String catName, String dbName, String funcName) throws MetaException { - return objectStore.getFunction(dbName, funcName); + return objectStore.getFunction(catName, dbName, funcName); } @Override - public List getAllFunctions() + public List getAllFunctions(String catName) throws MetaException { return Collections.emptyList(); } @Override - public List getFunctions(String dbName, String pattern) + public List getFunctions(String catName, String dbName, String pattern) throws MetaException { - return objectStore.getFunctions(dbName, pattern); + return objectStore.getFunctions(catName, dbName, pattern); } @Override - public AggrStats get_aggr_stats_for(String dbName, + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException { return null; @@ -847,14 +868,14 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public List getPrimaryKeys(String db_name, String tbl_name) + public List getPrimaryKeys(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getForeignKeys(String parent_db_name, + public List getForeignKeys(String catName, String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException { // TODO Auto-generated method stub @@ -862,28 +883,28 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public List getUniqueConstraints(String db_name, String tbl_name) + public List getUniqueConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getNotNullConstraints(String db_name, String tbl_name) + public List getNotNullConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getDefaultConstraints(String db_name, String tbl_name) + public List getDefaultConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getCheckConstraints(String db_name, String tbl_name) + public List getCheckConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; @@ -902,8 +923,8 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public void dropConstraint(String dbName, String tableName, - String constraintName) throws NoSuchObjectException { + public void dropConstraint(String catName, String dbName, String tableName, + String constraintName, boolean missingOk) throws NoSuchObjectException { // TODO Auto-generated method stub } @@ -959,7 +980,8 @@ public void createResourcePlan(WMResourcePlan resourcePlan, String copyFrom, int } @Override - public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException { + public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException, + MetaException { return objectStore.getResourcePlan(name); } @@ -1062,6 +1084,14 @@ public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerNa objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath); } + + @Override + public List getPartitionColStatsForDatabase(String catName, String dbName) + throws MetaException, NoSuchObjectException { + // TODO Auto-generated method stub + return null; + } + public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException, NoSuchObjectException { objectStore.createISchema(schema); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 357ac2d5b6..773e4f8dec 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.ISchemaName; import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import java.nio.ByteBuffer; @@ -140,38 +141,64 @@ public void rollbackTransaction() { } @Override + public void createCatalog(Catalog cat) throws MetaException { + + } + + @Override + public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException { + return null; + } + + @Override + public void alterCatalog(String catName, Catalog cat) throws MetaException, + InvalidOperationException { + + } + + @Override + public List getCatalogs() throws MetaException { + return null; + } + + @Override + public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException { + + } + + @Override public void createDatabase(Database db) throws InvalidObjectException, MetaException { } @Override - public Database getDatabase(String name) throws NoSuchObjectException { + public Database getDatabase(String catName, String name) throws NoSuchObjectException { return null; } @Override - public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException { + public boolean dropDatabase(String catName, String dbname) throws NoSuchObjectException, MetaException { return false; } @Override - public boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException, + public boolean alterDatabase(String catName, String dbname, Database db) throws NoSuchObjectException, MetaException { return false; } @Override - public List getDatabases(String pattern) throws MetaException { + public List getDatabases(String catName, String pattern) throws MetaException { return Collections.emptyList(); } @Override - public List getAllDatabases() throws MetaException { + public List getAllDatabases(String catName) throws MetaException { return Collections.emptyList(); } @@ -201,13 +228,13 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException } @Override - public boolean dropTable(String dbName, String tableName) throws MetaException { + public boolean dropTable(String catName, String dbName, String tableName) throws MetaException { return false; } @Override - public Table getTable(String dbName, String tableName) throws MetaException { + public Table getTable(String catName, String dbName, String tableName) throws MetaException { return null; } @@ -219,144 +246,141 @@ public boolean addPartition(Partition part) throws InvalidObjectException, MetaE } @Override - public Partition getPartition(String dbName, String tableName, List part_vals) + public Partition getPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException { return null; } @Override - public boolean dropPartition(String dbName, String tableName, List part_vals) + public boolean dropPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException { return false; } @Override - public List getPartitions(String dbName, String tableName, int max) + public List getPartitions(String catName, String dbName, String tableName, int max) throws MetaException { return Collections.emptyList(); } @Override - public void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException, - MetaException { - - + public void alterTable(String catName, String dbname, String name, Table newTable) + throws InvalidObjectException, MetaException { } @Override - public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) throws MetaException { } - @Override - public List getTables(String dbName, String pattern) throws MetaException { - + public List getTables(String catName, String dbName, String pattern) throws MetaException { return Collections.emptyList(); } @Override - public List getTables(String dbName, String pattern, TableType tableType) throws MetaException { + public List getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException { return Collections.emptyList(); } @Override - public List getMaterializedViewsForRewriting(String dbName) + public List getMaterializedViewsForRewriting(String catName, String dbName) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override - public List getTableMeta(String dbNames, String tableNames, List tableTypes) + public List getTableMeta(String catName, String dbNames, String tableNames, List tableTypes) throws MetaException { return Collections.emptyList(); } @Override - public List
getTableObjectsByName(String dbname, List tableNames) + public List
getTableObjectsByName(String catName, String dbname, List tableNames) throws MetaException, UnknownDBException { return Collections.emptyList(); } @Override - public List getAllTables(String dbName) throws MetaException { + public List getAllTables(String catName, String dbName) throws MetaException { return Collections.emptyList(); } @Override - public List listTableNamesByFilter(String dbName, String filter, short max_tables) + public List listTableNamesByFilter(String catName, String dbName, String filter, short max_tables) throws MetaException, UnknownDBException { return Collections.emptyList(); } @Override - public List listPartitionNames(String db_name, String tbl_name, short max_parts) + public List listPartitionNames(String catName, String db_name, String tbl_name, short max_parts) throws MetaException { return Collections.emptyList(); } @Override - public PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException { + public PartitionValuesResponse listPartitionValues(String catName, String db_name, + String tbl_name, List cols, + boolean applyDistinct, String filter, + boolean ascending, List order, + long maxParts) throws MetaException { return null; } @Override - public List listPartitionNamesByFilter(String db_name, String tbl_name, String filter, - short max_parts) throws MetaException { - - return Collections.emptyList(); - } - - @Override - public void alterPartition(String db_name, String tbl_name, List part_vals, + public void alterPartition(String catName, String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidObjectException, MetaException { } @Override - public void alterPartitions(String db_name, String tbl_name, List> part_vals_list, - List new_parts) throws InvalidObjectException, MetaException { + public void alterPartitions(String catName, String db_name, String tbl_name, + List> part_vals_list, List new_parts) + throws InvalidObjectException, MetaException { + + } @Override - public List getPartitionsByFilter(String dbName, String tblName, String filter, - short maxParts) throws MetaException, NoSuchObjectException { + public List getPartitionsByFilter(String catName, String dbName, String tblName, + String filter, short maxParts) + throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override - public List getPartitionsByNames(String dbName, String tblName, + public List getPartitionsByNames(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override - public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, + public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException { return false; } @Override - public int getNumPartitionsByFilter(String dbName, String tblName, String filter) + public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException { return -1; } @Override - public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) + public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException { return -1; } @Override - public Table markPartitionForEvent(String dbName, String tblName, Map partVals, + public Table markPartitionForEvent(String catName, String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { @@ -364,7 +388,7 @@ public Table markPartitionForEvent(String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { @@ -407,21 +431,21 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List g } @Override - public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, + public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException { return null; } @Override - public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName, String userName, List groupNames) throws InvalidObjectException, MetaException { return null; } @Override - public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName, String partition, String userName, List groupNames) throws InvalidObjectException, MetaException { @@ -429,7 +453,7 @@ public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tabl } @Override - public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException { @@ -445,21 +469,21 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa @Override public List listPrincipalDBGrants(String principalName, - PrincipalType principalType, String dbName) { + PrincipalType principalType, String catName, String dbName) { return Collections.emptyList(); } @Override public List listAllTableGrants(String principalName, - PrincipalType principalType, String dbName, String tableName) { + PrincipalType principalType, String catName, String dbName, String tableName) { return Collections.emptyList(); } @Override public List listPrincipalPartitionGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, List partValues, + PrincipalType principalType, String catName, String dbName, String tableName, List partValues, String partName) { return Collections.emptyList(); @@ -467,14 +491,14 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa @Override public List listPrincipalTableColumnGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, String columnName) { + PrincipalType principalType, String catName, String dbName, String tableName, String columnName) { return Collections.emptyList(); } @Override public List listPrincipalPartitionColumnGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, List partVals, + PrincipalType principalType, String catName, String dbName, String tableName, List partVals, String partName, String columnName) { return Collections.emptyList(); @@ -524,7 +548,7 @@ public Role getRole(String roleName) throws NoSuchObjectException { } @Override - public Partition getPartitionWithAuth(String dbName, String tblName, List partVals, + public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String user_name, List group_names) throws MetaException, NoSuchObjectException, InvalidObjectException { @@ -532,7 +556,7 @@ public Partition getPartitionWithAuth(String dbName, String tblName, List getPartitionsWithAuth(String dbName, String tblName, short maxParts, + public List getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { @@ -540,14 +564,14 @@ public Partition getPartitionWithAuth(String dbName, String tblName, List listPartitionNamesPs(String db_name, String tbl_name, List part_vals, + public List listPartitionNamesPs(String catName, String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override - public List listPartitionsPsWithAuth(String db_name, String tbl_name, + public List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, List part_vals, short max_parts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { @@ -635,46 +659,46 @@ public boolean removeMasterKey(Integer keySeq) { } @Override - public List listDBGrantsAll(String dbName) { + public List listDBGrantsAll(String catName, String dbName) { return Collections.emptyList(); } @Override - public List listPartitionColumnGrantsAll(String dbName, String tableName, String partitionName, String columnName) { + public List listPartitionColumnGrantsAll(String catName, String dbName, String tableName, String partitionName, String columnName) { return Collections.emptyList(); } @Override - public List listTableGrantsAll(String dbName, String tableName) { + public List listTableGrantsAll(String catName, String dbName, String tableName) { return Collections.emptyList(); } @Override - public List listPartitionGrantsAll(String dbName, String tableName, String partitionName) { + public List listPartitionGrantsAll(String catName, String dbName, String tableName, String partitionName) { return Collections.emptyList(); } @Override - public List listTableColumnGrantsAll(String dbName, String tableName, String columnName) { + public List listTableColumnGrantsAll(String catName, String dbName, String tableName, String columnName) { return Collections.emptyList(); } @Override - public ColumnStatistics getTableColumnStatistics(String dbName, String tableName, + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, List colName) throws MetaException, NoSuchObjectException { return null; } @Override - public boolean deleteTableColumnStatistics(String dbName, String tableName, - String colName) + public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, + String colName) throws NoSuchObjectException, MetaException, InvalidObjectException { return false; } @Override - public boolean deletePartitionColumnStatistics(String dbName, String tableName, + public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { @@ -708,31 +732,31 @@ public void setMetaStoreSchemaVersion(String version, String comment) throws Met } @Override - public List getPartitionColumnStatistics(String dbName, + public List getPartitionColumnStatistics(String catName, String dbName, String tblName, List colNames, List partNames) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override - public boolean doesPartitionExist(String dbName, String tableName, + public boolean doesPartitionExist(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException { return false; } @Override - public boolean addPartitions(String dbName, String tblName, List parts) + public boolean addPartitions(String catName, String dbName, String tblName, List parts) throws InvalidObjectException, MetaException { return false; } @Override - public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { + public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { return false; } @Override - public void dropPartitions(String dbName, String tblName, List partNames) { + public void dropPartitions(String catName, String dbName, String tblName, List partNames) { } @Override @@ -741,36 +765,36 @@ public void createFunction(Function func) throws InvalidObjectException, } @Override - public void alterFunction(String dbName, String funcName, Function newFunction) + public void alterFunction(String catName, String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException { } @Override - public void dropFunction(String dbName, String funcName) + public void dropFunction(String catName, String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { } @Override - public Function getFunction(String dbName, String funcName) + public Function getFunction(String catName, String dbName, String funcName) throws MetaException { return null; } @Override - public List getAllFunctions() + public List getAllFunctions(String catName) throws MetaException { return Collections.emptyList(); } @Override - public List getFunctions(String dbName, String pattern) + public List getFunctions(String catName, String dbName, String pattern) throws MetaException { return Collections.emptyList(); } @Override - public AggrStats get_aggr_stats_for(String dbName, + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException { return null; @@ -847,14 +871,14 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public List getPrimaryKeys(String db_name, String tbl_name) + public List getPrimaryKeys(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getForeignKeys(String parent_db_name, + public List getForeignKeys(String catName, String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException { // TODO Auto-generated method stub @@ -862,28 +886,28 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public List getUniqueConstraints(String db_name, String tbl_name) + public List getUniqueConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getNotNullConstraints(String db_name, String tbl_name) + public List getNotNullConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getDefaultConstraints(String db_name, String tbl_name) + public List getDefaultConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getCheckConstraints(String db_name, String tbl_name) + public List getCheckConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; @@ -902,8 +926,8 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public void dropConstraint(String dbName, String tableName, - String constraintName) throws NoSuchObjectException { + public void dropConstraint(String catName, String dbName, String tableName, + String constraintName, boolean missingOk) throws NoSuchObjectException { // TODO Auto-generated method stub } @@ -1049,6 +1073,13 @@ public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerNa String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException { } + @Override + public List getPartitionColStatsForDatabase(String catName, String dbName) + throws MetaException, NoSuchObjectException { + // TODO Auto-generated method stub + return null; + } + public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException { } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java new file mode 100644 index 0000000000..c1c39bf264 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -0,0 +1,3286 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.net.InetAddress; +import java.net.URI; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.NoSuchElementException; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.hooks.URIResolverHook; +import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.utils.ObjectPair; +import org.apache.hadoop.hive.metastore.utils.SecurityUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; +import org.apache.thrift.protocol.TBinaryProtocol; +import org.apache.thrift.protocol.TCompactProtocol; +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.transport.TFramedTransport; +import org.apache.thrift.transport.TSocket; +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Lists; + +/** + * Hive Metastore Client. + * The public implementation of IMetaStoreClient. Methods not inherited from IMetaStoreClient + * are not public and can change. Hence this is marked as unstable. + * For users who require retry mechanism when the connection between metastore and client is + * broken, RetryingMetaStoreClient class should be used. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoCloseable { + /** + * Capabilities of the current client. If this client talks to a MetaStore server in a manner + * implying the usage of some expanded features that require client-side support that this client + * doesn't have (e.g. a getting a table of a new type), it will get back failures when the + * capability checking is enabled (the default). + */ + public final static ClientCapabilities VERSION = new ClientCapabilities( + Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES)); + // Test capability for tests. + public final static ClientCapabilities TEST_VERSION = new ClientCapabilities( + Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES, ClientCapability.TEST_CAPABILITY)); + + ThriftHiveMetastore.Iface client = null; + private TTransport transport = null; + private boolean isConnected = false; + private URI metastoreUris[]; + private final HiveMetaHookLoader hookLoader; + protected final Configuration conf; // Keep a copy of HiveConf so if Session conf changes, we may need to get a new HMS client. + protected boolean fastpath = false; + private String tokenStrForm; + private final boolean localMetaStore; + private final MetaStoreFilterHook filterHook; + private final URIResolverHook uriResolverHook; + private final int fileMetadataBatchSize; + + private Map currentMetaVars; + + private static final AtomicInteger connCount = new AtomicInteger(0); + + // for thrift connects + private int retries = 5; + private long retryDelaySeconds = 0; + private final ClientCapabilities version; + + static final protected Logger LOG = LoggerFactory.getLogger(HiveMetaStoreClientPreCatalog.class); + + public HiveMetaStoreClientPreCatalog(Configuration conf) throws MetaException { + this(conf, null, true); + } + + public HiveMetaStoreClientPreCatalog(Configuration conf, HiveMetaHookLoader hookLoader) throws MetaException { + this(conf, hookLoader, true); + } + + public HiveMetaStoreClientPreCatalog(Configuration conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded) + throws MetaException { + + this.hookLoader = hookLoader; + if (conf == null) { + conf = MetastoreConf.newMetastoreConf(); + this.conf = conf; + } else { + this.conf = new Configuration(conf); + } + version = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ? TEST_VERSION : VERSION; + filterHook = loadFilterHooks(); + uriResolverHook = loadUriResolverHook(); + fileMetadataBatchSize = MetastoreConf.getIntVar( + conf, ConfVars.BATCH_RETRIEVE_OBJECTS_MAX); + + String msUri = MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS); + localMetaStore = MetastoreConf.isEmbeddedMetaStore(msUri); + if (localMetaStore) { + if (!allowEmbedded) { + throw new MetaException("Embedded metastore is not allowed here. Please configure " + + ConfVars.THRIFT_URIS.toString() + "; it is currently set to [" + msUri + "]"); + } + // instantiate the metastore server handler directly instead of connecting + // through the network + client = HiveMetaStore.newRetryingHMSHandler("hive client", this.conf, true); + // Initialize materializations invalidation cache (only for local metastore) + MaterializationsInvalidationCache.get().init(conf, (IHMSHandler) client); + isConnected = true; + snapshotActiveConf(); + return; + } + + // get the number retries + retries = MetastoreConf.getIntVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES); + retryDelaySeconds = MetastoreConf.getTimeVar(conf, + ConfVars.CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS); + + // user wants file store based configuration + if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS) != null) { + resolveUris(); + } else { + LOG.error("NOT getting uris from conf"); + throw new MetaException("MetaStoreURIs not found in conf file"); + } + + //If HADOOP_PROXY_USER is set in env or property, + //then need to create metastore client that proxies as that user. + String HADOOP_PROXY_USER = "HADOOP_PROXY_USER"; + String proxyUser = System.getenv(HADOOP_PROXY_USER); + if (proxyUser == null) { + proxyUser = System.getProperty(HADOOP_PROXY_USER); + } + //if HADOOP_PROXY_USER is set, create DelegationToken using real user + if(proxyUser != null) { + LOG.info(HADOOP_PROXY_USER + " is set. Using delegation " + + "token for HiveMetaStore connection."); + try { + UserGroupInformation.getLoginUser().getRealUser().doAs( + new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + open(); + return null; + } + }); + String delegationTokenPropString = "DelegationTokenForHiveMetaStoreServer"; + String delegationTokenStr = getDelegationToken(proxyUser, proxyUser); + SecurityUtils.setTokenStr(UserGroupInformation.getCurrentUser(), delegationTokenStr, + delegationTokenPropString); + MetastoreConf.setVar(this.conf, ConfVars.TOKEN_SIGNATURE, delegationTokenPropString); + close(); + } catch (Exception e) { + LOG.error("Error while setting delegation token for " + proxyUser, e); + if(e instanceof MetaException) { + throw (MetaException)e; + } else { + throw new MetaException(e.getMessage()); + } + } + } + // finally open the store + open(); + } + + private void resolveUris() throws MetaException { + String metastoreUrisString[] = MetastoreConf.getVar(conf, + ConfVars.THRIFT_URIS).split(","); + + List metastoreURIArray = new ArrayList(); + try { + int i = 0; + for (String s : metastoreUrisString) { + URI tmpUri = new URI(s); + if (tmpUri.getScheme() == null) { + throw new IllegalArgumentException("URI: " + s + + " does not have a scheme"); + } + if (uriResolverHook != null) { + metastoreURIArray.addAll(uriResolverHook.resolveURI(tmpUri)); + } else { + metastoreURIArray.add(new URI( + tmpUri.getScheme(), + tmpUri.getUserInfo(), + HadoopThriftAuthBridge.getBridge().getCanonicalHostName(tmpUri.getHost()), + tmpUri.getPort(), + tmpUri.getPath(), + tmpUri.getQuery(), + tmpUri.getFragment() + )); + } + } + metastoreUris = new URI[metastoreURIArray.size()]; + for (int j = 0; j < metastoreURIArray.size(); j++) { + metastoreUris[j] = metastoreURIArray.get(j); + } + + if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) { + List uriList = Arrays.asList(metastoreUris); + Collections.shuffle(uriList); + metastoreUris = (URI[]) uriList.toArray(); + } + } catch (IllegalArgumentException e) { + throw (e); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + } + + + private MetaStoreFilterHook loadFilterHooks() throws IllegalStateException { + Class authProviderClass = MetastoreConf. + getClass(conf, ConfVars.FILTER_HOOK, DefaultMetaStoreFilterHookImpl.class, + MetaStoreFilterHook.class); + String msg = "Unable to create instance of " + authProviderClass.getName() + ": "; + try { + Constructor constructor = + authProviderClass.getConstructor(Configuration.class); + return constructor.newInstance(conf); + } catch (NoSuchMethodException | SecurityException | IllegalAccessException | InstantiationException | IllegalArgumentException | InvocationTargetException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } + } + + //multiple clients may initialize the hook at the same time + synchronized private URIResolverHook loadUriResolverHook() throws IllegalStateException { + + String uriResolverClassName = + MetastoreConf.getAsString(conf, ConfVars.URI_RESOLVER); + if (uriResolverClassName.equals("")) { + return null; + } else { + LOG.info("Loading uri resolver" + uriResolverClassName); + try { + Class uriResolverClass = Class.forName(uriResolverClassName, true, + JavaUtils.getClassLoader()); + return (URIResolverHook) ReflectionUtils.newInstance(uriResolverClass, null); + } catch (Exception e) { + LOG.error("Exception loading uri resolver hook" + e); + return null; + } + } + } + + /** + * Swaps the first element of the metastoreUris array with a random element from the + * remainder of the array. + */ + private void promoteRandomMetaStoreURI() { + if (metastoreUris.length <= 1) { + return; + } + Random rng = new Random(); + int index = rng.nextInt(metastoreUris.length - 1) + 1; + URI tmp = metastoreUris[0]; + metastoreUris[0] = metastoreUris[index]; + metastoreUris[index] = tmp; + } + + @VisibleForTesting + public TTransport getTTransport() { + return transport; + } + + @Override + public boolean isLocalMetaStore() { + return localMetaStore; + } + + @Override + public boolean isCompatibleWith(Configuration conf) { + // Make a copy of currentMetaVars, there is a race condition that + // currentMetaVars might be changed during the execution of the method + Map currentMetaVarsCopy = currentMetaVars; + if (currentMetaVarsCopy == null) { + return false; // recreate + } + boolean compatible = true; + for (ConfVars oneVar : MetastoreConf.metaVars) { + // Since metaVars are all of different types, use string for comparison + String oldVar = currentMetaVarsCopy.get(oneVar.getVarname()); + String newVar = MetastoreConf.getAsString(conf, oneVar); + if (oldVar == null || + (oneVar.isCaseSensitive() ? !oldVar.equals(newVar) : !oldVar.equalsIgnoreCase(newVar))) { + LOG.info("Mestastore configuration " + oneVar.toString() + + " changed from " + oldVar + " to " + newVar); + compatible = false; + } + } + return compatible; + } + + @Override + public void setHiveAddedJars(String addedJars) { + MetastoreConf.setVar(conf, ConfVars.ADDED_JARS, addedJars); + } + + @Override + public void reconnect() throws MetaException { + if (localMetaStore) { + // For direct DB connections we don't yet support reestablishing connections. + throw new MetaException("For direct MetaStore DB connections, we don't support retries" + + " at the client level."); + } else { + close(); + + if (uriResolverHook != null) { + //for dynamic uris, re-lookup if there are new metastore locations + resolveUris(); + } + + if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) { + // Swap the first element of the metastoreUris[] with a random element from the rest + // of the array. Rationale being that this method will generally be called when the default + // connection has died and the default connection is likely to be the first array element. + promoteRandomMetaStoreURI(); + } + open(); + } + } + + /** + * @param dbname + * @param tbl_name + * @param new_tbl + * @throws InvalidOperationException + * @throws MetaException + * @throws TException + * @see + * org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table( + * java.lang.String, java.lang.String, + * org.apache.hadoop.hive.metastore.api.Table) + */ + @Override + public void alter_table(String dbname, String tbl_name, Table new_tbl) + throws InvalidOperationException, MetaException, TException { + alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null); + } + + @Override + public void alter_table(String defaultDatabaseName, String tblName, Table table, + boolean cascade) throws InvalidOperationException, MetaException, TException { + EnvironmentContext environmentContext = new EnvironmentContext(); + if (cascade) { + environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); + } + alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext); + } + + @Override + public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl, + EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { + client.alter_table_with_environment_context(dbname, tbl_name, new_tbl, envContext); + } + + /** + * @param dbname + * @param name + * @param part_vals + * @param newPart + * @throws InvalidOperationException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#rename_partition( + * java.lang.String, java.lang.String, java.util.List, org.apache.hadoop.hive.metastore.api.Partition) + */ + @Override + public void renamePartition(final String dbname, final String name, final List part_vals, final Partition newPart) + throws InvalidOperationException, MetaException, TException { + client.rename_partition(dbname, name, part_vals, newPart); + } + + private void open() throws MetaException { + isConnected = false; + TTransportException tte = null; + boolean useSSL = MetastoreConf.getBoolVar(conf, ConfVars.USE_SSL); + boolean useSasl = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_SASL); + boolean useFramedTransport = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_FRAMED_TRANSPORT); + boolean useCompactProtocol = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_COMPACT_PROTOCOL); + int clientSocketTimeout = (int) MetastoreConf.getTimeVar(conf, + ConfVars.CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS); + + for (int attempt = 0; !isConnected && attempt < retries; ++attempt) { + for (URI store : metastoreUris) { + LOG.info("Trying to connect to metastore with URI " + store); + + try { + if (useSSL) { + try { + String trustStorePath = MetastoreConf.getVar(conf, ConfVars.SSL_TRUSTSTORE_PATH).trim(); + if (trustStorePath.isEmpty()) { + throw new IllegalArgumentException(ConfVars.SSL_TRUSTSTORE_PATH.toString() + + " Not configured for SSL connection"); + } + String trustStorePassword = + MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.SSL_TRUSTSTORE_PASSWORD); + + // Create an SSL socket and connect + transport = SecurityUtils.getSSLSocket(store.getHost(), store.getPort(), clientSocketTimeout, + trustStorePath, trustStorePassword ); + LOG.info("Opened an SSL connection to metastore, current connections: " + connCount.incrementAndGet()); + } catch(IOException e) { + throw new IllegalArgumentException(e); + } catch(TTransportException e) { + tte = e; + throw new MetaException(e.toString()); + } + } else { + transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout); + } + + if (useSasl) { + // Wrap thrift connection with SASL for secure connection. + try { + HadoopThriftAuthBridge.Client authBridge = + HadoopThriftAuthBridge.getBridge().createClient(); + + // check if we should use delegation tokens to authenticate + // the call below gets hold of the tokens if they are set up by hadoop + // this should happen on the map/reduce tasks if the client added the + // tokens into hadoop's credential store in the front end during job + // submission. + String tokenSig = MetastoreConf.getVar(conf, ConfVars.TOKEN_SIGNATURE); + // tokenSig could be null + tokenStrForm = SecurityUtils.getTokenStrForm(tokenSig); + + if(tokenStrForm != null) { + LOG.info("HMSC::open(): Found delegation token. Creating DIGEST-based thrift connection."); + // authenticate using delegation tokens via the "DIGEST" mechanism + transport = authBridge.createClientTransport(null, store.getHost(), + "DIGEST", tokenStrForm, transport, + MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL)); + } else { + LOG.info("HMSC::open(): Could not find delegation token. Creating KERBEROS-based thrift connection."); + String principalConfig = + MetastoreConf.getVar(conf, ConfVars.KERBEROS_PRINCIPAL); + transport = authBridge.createClientTransport( + principalConfig, store.getHost(), "KERBEROS", null, + transport, MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL)); + } + } catch (IOException ioe) { + LOG.error("Couldn't create client transport", ioe); + throw new MetaException(ioe.toString()); + } + } else { + if (useFramedTransport) { + transport = new TFramedTransport(transport); + } + } + + final TProtocol protocol; + if (useCompactProtocol) { + protocol = new TCompactProtocol(transport); + } else { + protocol = new TBinaryProtocol(transport); + } + client = new ThriftHiveMetastore.Client(protocol); + try { + if (!transport.isOpen()) { + transport.open(); + LOG.info("Opened a connection to metastore, current connections: " + connCount.incrementAndGet()); + } + isConnected = true; + } catch (TTransportException e) { + tte = e; + if (LOG.isDebugEnabled()) { + LOG.warn("Failed to connect to the MetaStore Server...", e); + } else { + // Don't print full exception trace if DEBUG is not on. + LOG.warn("Failed to connect to the MetaStore Server..."); + } + } + + if (isConnected && !useSasl && MetastoreConf.getBoolVar(conf, ConfVars.EXECUTE_SET_UGI)){ + // Call set_ugi, only in unsecure mode. + try { + UserGroupInformation ugi = SecurityUtils.getUGI(); + client.set_ugi(ugi.getUserName(), Arrays.asList(ugi.getGroupNames())); + } catch (LoginException e) { + LOG.warn("Failed to do login. set_ugi() is not successful, " + + "Continuing without it.", e); + } catch (IOException e) { + LOG.warn("Failed to find ugi of client set_ugi() is not successful, " + + "Continuing without it.", e); + } catch (TException e) { + LOG.warn("set_ugi() not successful, Likely cause: new client talking to old server. " + + "Continuing without it.", e); + } + } + } catch (MetaException e) { + LOG.error("Unable to connect to metastore with URI " + store + + " in attempt " + attempt, e); + } + if (isConnected) { + break; + } + } + // Wait before launching the next round of connection retries. + if (!isConnected && retryDelaySeconds > 0) { + try { + LOG.info("Waiting " + retryDelaySeconds + " seconds before next connection attempt."); + Thread.sleep(retryDelaySeconds * 1000); + } catch (InterruptedException ignore) {} + } + } + + if (!isConnected) { + throw new MetaException("Could not connect to meta store using any of the URIs provided." + + " Most recent failure: " + StringUtils.stringifyException(tte)); + } + + snapshotActiveConf(); + + LOG.info("Connected to metastore."); + } + + private void snapshotActiveConf() { + currentMetaVars = new HashMap<>(MetastoreConf.metaVars.length); + for (ConfVars oneVar : MetastoreConf.metaVars) { + currentMetaVars.put(oneVar.getVarname(), MetastoreConf.getAsString(conf, oneVar)); + } + } + + @Override + public String getTokenStrForm() throws IOException { + return tokenStrForm; + } + + @Override + public void close() { + isConnected = false; + currentMetaVars = null; + try { + if (null != client) { + client.shutdown(); + } + } catch (TException e) { + LOG.debug("Unable to shutdown metastore client. Will try closing transport directly.", e); + } + // Transport would have got closed via client.shutdown(), so we dont need this, but + // just in case, we make this call. + if ((transport != null) && transport.isOpen()) { + transport.close(); + LOG.info("Closed a connection to metastore, current connections: " + connCount.decrementAndGet()); + } + } + + @Override + public void setMetaConf(String key, String value) throws TException { + client.setMetaConf(key, value); + } + + @Override + public String getMetaConf(String key) throws TException { + return client.getMetaConf(key); + } + + /** + * @param new_part + * @return the added partition + * @throws InvalidObjectException + * @throws AlreadyExistsException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partition(org.apache.hadoop.hive.metastore.api.Partition) + */ + @Override + public Partition add_partition(Partition new_part) throws TException { + return add_partition(new_part, null); + } + + public Partition add_partition(Partition new_part, EnvironmentContext envContext) + throws TException { + Partition p = client.add_partition_with_environment_context(new_part, envContext); + return fastpath ? p : deepCopy(p); + } + + /** + * @param new_parts + * @throws InvalidObjectException + * @throws AlreadyExistsException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partitions(List) + */ + @Override + public int add_partitions(List new_parts) throws TException { + return client.add_partitions(new_parts); + } + + @Override + public List add_partitions( + List parts, boolean ifNotExists, boolean needResults) throws TException { + if (parts.isEmpty()) { + return needResults ? new ArrayList<>() : null; + } + Partition part = parts.get(0); + AddPartitionsRequest req = new AddPartitionsRequest( + part.getDbName(), part.getTableName(), parts, ifNotExists); + req.setNeedResult(needResults); + AddPartitionsResult result = client.add_partitions_req(req); + return needResults ? filterHook.filterPartitions(result.getPartitions()) : null; + } + + @Override + public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException { + return client.add_partitions_pspec(partitionSpec.toPartitionSpec()); + } + + /** + * @param table_name + * @param db_name + * @param part_vals + * @return the appended partition + * @throws InvalidObjectException + * @throws AlreadyExistsException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String, + * java.lang.String, java.util.List) + */ + @Override + public Partition appendPartition(String db_name, String table_name, + List part_vals) throws TException { + return appendPartition(db_name, table_name, part_vals, null); + } + + public Partition appendPartition(String db_name, String table_name, List part_vals, + EnvironmentContext envContext) throws TException { + Partition p = client.append_partition_with_environment_context(db_name, table_name, + part_vals, envContext); + return fastpath ? p : deepCopy(p); + } + + @Override + public Partition appendPartition(String dbName, String tableName, String partName) + throws TException { + return appendPartition(dbName, tableName, partName, (EnvironmentContext)null); + } + + public Partition appendPartition(String dbName, String tableName, String partName, + EnvironmentContext envContext) throws TException { + Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName, + partName, envContext); + return fastpath ? p : deepCopy(p); + } + + /** + * Exchange the partition between two tables + * @param partitionSpecs partitions specs of the parent partition to be exchanged + * @param destDb the db of the destination table + * @param destinationTableName the destination table name + * @return new partition after exchanging + */ + @Override + public Partition exchange_partition(Map partitionSpecs, + String sourceDb, String sourceTable, String destDb, + String destinationTableName) throws MetaException, + NoSuchObjectException, InvalidObjectException, TException { + return client.exchange_partition(partitionSpecs, sourceDb, sourceTable, + destDb, destinationTableName); + } + + /** + * Exchange the partitions between two tables + * @param partitionSpecs partitions specs of the parent partition to be exchanged + * @param destDb the db of the destination table + * @param destinationTableName the destination table name + * @return new partitions after exchanging + */ + @Override + public List exchange_partitions(Map partitionSpecs, + String sourceDb, String sourceTable, String destDb, + String destinationTableName) throws MetaException, + NoSuchObjectException, InvalidObjectException, TException { + return client.exchange_partitions(partitionSpecs, sourceDb, sourceTable, + destDb, destinationTableName); + } + + @Override + public void validatePartitionNameCharacters(List partVals) + throws TException, MetaException { + client.partition_name_has_valid_characters(partVals, true); + } + + /** + * Create a new Database + * @param db + * @throws AlreadyExistsException + * @throws InvalidObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(Database) + */ + @Override + public void createDatabase(Database db) + throws AlreadyExistsException, InvalidObjectException, MetaException, TException { + client.create_database(db); + } + + /** + * @param tbl + * @throws MetaException + * @throws NoSuchObjectException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table) + */ + @Override + public void createTable(Table tbl) throws AlreadyExistsException, + InvalidObjectException, MetaException, NoSuchObjectException, TException { + createTable(tbl, null); + } + + public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException, + InvalidObjectException, MetaException, NoSuchObjectException, TException { + HiveMetaHook hook = getHook(tbl); + if (hook != null) { + hook.preCreateTable(tbl); + } + boolean success = false; + try { + // Subclasses can override this step (for example, for temporary tables) + create_table_with_environment_context(tbl, envContext); + if (hook != null) { + hook.commitCreateTable(tbl); + } + success = true; + } + finally { + if (!success && (hook != null)) { + try { + hook.rollbackCreateTable(tbl); + } catch (Exception e){ + LOG.error("Create rollback failed with", e); + } + } + } + } + + @Override + public void createTableWithConstraints(Table tbl, + List primaryKeys, List foreignKeys, + List uniqueConstraints, + List notNullConstraints, + List defaultConstraints, + List checkConstraints) + throws AlreadyExistsException, InvalidObjectException, + MetaException, NoSuchObjectException, TException { + HiveMetaHook hook = getHook(tbl); + if (hook != null) { + hook.preCreateTable(tbl); + } + boolean success = false; + try { + // Subclasses can override this step (for example, for temporary tables) + client.create_table_with_constraints(tbl, primaryKeys, foreignKeys, + uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); + if (hook != null) { + hook.commitCreateTable(tbl); + } + success = true; + } finally { + if (!success && (hook != null)) { + hook.rollbackCreateTable(tbl); + } + } + } + + @Override + public void dropConstraint(String dbName, String tableName, String constraintName) throws + NoSuchObjectException, MetaException, TException { + client.drop_constraint(new DropConstraintRequest(dbName, tableName, constraintName)); + } + + @Override + public void addPrimaryKey(List primaryKeyCols) throws + NoSuchObjectException, MetaException, TException { + client.add_primary_key(new AddPrimaryKeyRequest(primaryKeyCols)); + } + + @Override + public void addForeignKey(List foreignKeyCols) throws + NoSuchObjectException, MetaException, TException { + client.add_foreign_key(new AddForeignKeyRequest(foreignKeyCols)); + } + + @Override + public void addUniqueConstraint(List uniqueConstraintCols) throws + NoSuchObjectException, MetaException, TException { + client.add_unique_constraint(new AddUniqueConstraintRequest(uniqueConstraintCols)); + } + + @Override + public void addNotNullConstraint(List notNullConstraintCols) throws + NoSuchObjectException, MetaException, TException { + client.add_not_null_constraint(new AddNotNullConstraintRequest(notNullConstraintCols)); + } + + @Override + public void addDefaultConstraint(List defaultConstraints) throws + NoSuchObjectException, MetaException, TException { + client.add_default_constraint(new AddDefaultConstraintRequest(defaultConstraints)); + } + + @Override + public void addCheckConstraint(List checkConstraints) throws MetaException, + NoSuchObjectException, TException { + client.add_check_constraint(new AddCheckConstraintRequest(checkConstraints)); + } + + /** + * @param type + * @return true or false + * @throws AlreadyExistsException + * @throws InvalidObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_type(org.apache.hadoop.hive.metastore.api.Type) + */ + public boolean createType(Type type) throws AlreadyExistsException, + InvalidObjectException, MetaException, TException { + return client.create_type(type); + } + + /** + * @param name + * @throws NoSuchObjectException + * @throws InvalidOperationException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_database(java.lang.String, boolean, boolean) + */ + @Override + public void dropDatabase(String name) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + dropDatabase(name, true, false, false); + } + + @Override + public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + dropDatabase(name, deleteData, ignoreUnknownDb, false); + } + + @Override + public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + try { + getDatabase(name); + } catch (NoSuchObjectException e) { + if (!ignoreUnknownDb) { + throw e; + } + return; + } + + if (cascade) { + List tableList = getAllTables(name); + for (String table : tableList) { + try { + // Subclasses can override this step (for example, for temporary tables) + dropTable(name, table, deleteData, true); + } catch (UnsupportedOperationException e) { + // Ignore Index tables, those will be dropped with parent tables + } + } + } + client.drop_database(name, deleteData, cascade); + } + + /** + * @param tbl_name + * @param db_name + * @param part_vals + * @return true or false + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, + * java.lang.String, java.util.List, boolean) + */ + public boolean dropPartition(String db_name, String tbl_name, + List part_vals) throws NoSuchObjectException, MetaException, + TException { + return dropPartition(db_name, tbl_name, part_vals, true, null); + } + + public boolean dropPartition(String db_name, String tbl_name, List part_vals, + EnvironmentContext env_context) throws NoSuchObjectException, MetaException, TException { + return dropPartition(db_name, tbl_name, part_vals, true, env_context); + } + + @Override + public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData) + throws NoSuchObjectException, MetaException, TException { + return dropPartition(dbName, tableName, partName, deleteData, null); + } + + private static EnvironmentContext getEnvironmentContextWithIfPurgeSet() { + Map warehouseOptions = new HashMap<>(); + warehouseOptions.put("ifPurge", "TRUE"); + return new EnvironmentContext(warehouseOptions); + } + + /* + public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData, boolean ifPurge) + throws NoSuchObjectException, MetaException, TException { + + return dropPartition(dbName, tableName, partName, deleteData, + ifPurge? getEnvironmentContextWithIfPurgeSet() : null); + } + */ + + public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData, + EnvironmentContext envContext) throws NoSuchObjectException, MetaException, TException { + return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName, + deleteData, envContext); + } + + /** + * @param db_name + * @param tbl_name + * @param part_vals + * @param deleteData + * delete the underlying data or just delete the table in metadata + * @return true or false + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, + * java.lang.String, java.util.List, boolean) + */ + @Override + public boolean dropPartition(String db_name, String tbl_name, + List part_vals, boolean deleteData) throws NoSuchObjectException, + MetaException, TException { + return dropPartition(db_name, tbl_name, part_vals, deleteData, null); + } + + @Override + public boolean dropPartition(String db_name, String tbl_name, + List part_vals, PartitionDropOptions options) throws TException { + return dropPartition(db_name, tbl_name, part_vals, options.deleteData, + options.purgeData? getEnvironmentContextWithIfPurgeSet() : null); + } + + public boolean dropPartition(String db_name, String tbl_name, List part_vals, + boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException, + MetaException, TException { + return client.drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, + envContext); + } + + @Override + public List dropPartitions(String dbName, String tblName, + List> partExprs, PartitionDropOptions options) + throws TException { + RequestPartsSpec rps = new RequestPartsSpec(); + List exprs = new ArrayList<>(partExprs.size()); + for (ObjectPair partExpr : partExprs) { + DropPartitionsExpr dpe = new DropPartitionsExpr(); + dpe.setExpr(partExpr.getSecond()); + dpe.setPartArchiveLevel(partExpr.getFirst()); + exprs.add(dpe); + } + rps.setExprs(exprs); + DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps); + req.setDeleteData(options.deleteData); + req.setNeedResult(options.returnResults); + req.setIfExists(options.ifExists); + if (options.purgeData) { + LOG.info("Dropped partitions will be purged!"); + req.setEnvironmentContext(getEnvironmentContextWithIfPurgeSet()); + } + return client.drop_partitions_req(req).getPartitions(); + } + + @Override + public List dropPartitions(String dbName, String tblName, + List> partExprs, boolean deleteData, + boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException { + + return dropPartitions(dbName, tblName, partExprs, + PartitionDropOptions.instance() + .deleteData(deleteData) + .ifExists(ifExists) + .returnResults(needResult)); + + } + + @Override + public List dropPartitions(String dbName, String tblName, + List> partExprs, boolean deleteData, + boolean ifExists) throws NoSuchObjectException, MetaException, TException { + // By default, we need the results from dropPartitions(); + return dropPartitions(dbName, tblName, partExprs, + PartitionDropOptions.instance() + .deleteData(deleteData) + .ifExists(ifExists)); + } + + /** + * {@inheritDoc} + * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) + */ + @Override + public void dropTable(String dbname, String name, boolean deleteData, + boolean ignoreUnknownTab) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + dropTable(dbname, name, deleteData, ignoreUnknownTab, null); + } + + /** + * Drop the table and choose whether to save the data in the trash. + * @param ifPurge completely purge the table (skipping trash) while removing + * data from warehouse + * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) + */ + @Override + public void dropTable(String dbname, String name, boolean deleteData, + boolean ignoreUnknownTab, boolean ifPurge) + throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException { + //build new environmentContext with ifPurge; + EnvironmentContext envContext = null; + if(ifPurge){ + Map warehouseOptions; + warehouseOptions = new HashMap<>(); + warehouseOptions.put("ifPurge", "TRUE"); + envContext = new EnvironmentContext(warehouseOptions); + } + dropTable(dbname, name, deleteData, ignoreUnknownTab, envContext); + } + + /** + * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) + */ + @Override + public void dropTable(String dbname, String name) + throws NoSuchObjectException, MetaException, TException { + dropTable(dbname, name, true, true, null); + } + + /** + * Drop the table and choose whether to: delete the underlying table data; + * throw if the table doesn't exist; save the data in the trash. + * + * @param dbname + * @param name + * @param deleteData + * delete the underlying data or just delete the table in metadata + * @param ignoreUnknownTab + * don't throw if the requested table doesn't exist + * @param envContext + * for communicating with thrift + * @throws MetaException + * could not drop table properly + * @throws NoSuchObjectException + * the table wasn't found + * @throws TException + * a thrift communication error occurred + * @throws UnsupportedOperationException + * dropping an index table is not allowed + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String, + * java.lang.String, boolean) + */ + public void dropTable(String dbname, String name, boolean deleteData, + boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + Table tbl; + try { + tbl = getTable(dbname, name); + } catch (NoSuchObjectException e) { + if (!ignoreUnknownTab) { + throw e; + } + return; + } + HiveMetaHook hook = getHook(tbl); + if (hook != null) { + hook.preDropTable(tbl); + } + boolean success = false; + try { + drop_table_with_environment_context(dbname, name, deleteData, envContext); + if (hook != null) { + hook.commitDropTable(tbl, deleteData || (envContext != null && "TRUE".equals(envContext.getProperties().get("ifPurge")))); + } + success=true; + } catch (NoSuchObjectException e) { + if (!ignoreUnknownTab) { + throw e; + } + } finally { + if (!success && (hook != null)) { + hook.rollbackDropTable(tbl); + } + } + } + + /** + * Truncate the table/partitions in the DEFAULT database. + * @param dbName + * The db to which the table to be truncate belongs to + * @param tableName + * The table to truncate + * @param partNames + * List of partitions to truncate. NULL will truncate the whole table/all partitions + * @throws MetaException + * @throws TException + * Could not truncate table properly. + */ + @Override + public void truncateTable(String dbName, String tableName, List partNames) throws MetaException, TException { + client.truncate_table(dbName, tableName, partNames); + } + + /** + * Recycles the files recursively from the input path to the cmroot directory either by copying or moving it. + * + * @param request Inputs for path of the data files to be recycled to cmroot and + * isPurge flag when set to true files which needs to be recycled are not moved to Trash + * @return Response which is currently void + */ + @Override + public CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws MetaException, TException { + return client.cm_recycle(request); + } + + /** + * @param type + * @return true if the type is dropped + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_type(java.lang.String) + */ + public boolean dropType(String type) throws NoSuchObjectException, MetaException, TException { + return client.drop_type(type); + } + + /** + * @param name + * @return map of types + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type_all(java.lang.String) + */ + public Map getTypeAll(String name) throws MetaException, + TException { + Map result = null; + Map fromClient = client.get_type_all(name); + if (fromClient != null) { + result = new LinkedHashMap<>(); + for (String key : fromClient.keySet()) { + result.put(key, deepCopy(fromClient.get(key))); + } + } + return result; + } + + /** {@inheritDoc} */ + @Override + public List getDatabases(String databasePattern) + throws MetaException { + try { + return filterHook.filterDatabases(client.get_databases(databasePattern)); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + /** {@inheritDoc} */ + @Override + public List getAllDatabases() throws MetaException { + try { + return filterHook.filterDatabases(client.get_all_databases()); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + /** + * @param tbl_name + * @param db_name + * @param max_parts + * @return list of partitions + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + */ + @Override + public List listPartitions(String db_name, String tbl_name, + short max_parts) throws NoSuchObjectException, MetaException, TException { + List parts = client.get_partitions(db_name, tbl_name, max_parts); + return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException { + return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( + client.get_partitions_pspec(dbName, tableName, maxParts))); + } + + @Override + public List listPartitions(String db_name, String tbl_name, + List part_vals, short max_parts) + throws NoSuchObjectException, MetaException, TException { + List parts = client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts); + return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public List listPartitionsWithAuthInfo(String db_name, + String tbl_name, short max_parts, String user_name, List group_names) + throws NoSuchObjectException, MetaException, TException { + List parts = client.get_partitions_with_auth(db_name, tbl_name, max_parts, + user_name, group_names); + return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public List listPartitionsWithAuthInfo(String db_name, + String tbl_name, List part_vals, short max_parts, + String user_name, List group_names) throws NoSuchObjectException, + MetaException, TException { + List parts = client.get_partitions_ps_with_auth(db_name, + tbl_name, part_vals, max_parts, user_name, group_names); + return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + /** + * Get list of partitions matching specified filter + * @param db_name the database name + * @param tbl_name the table name + * @param filter the filter string, + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * be done only on string partition keys. + * @param max_parts the maximum number of partitions to return, + * all partitions are returned if -1 is passed + * @return list of partitions + * @throws MetaException + * @throws NoSuchObjectException + * @throws TException + */ + @Override + public List listPartitionsByFilter(String db_name, String tbl_name, + String filter, short max_parts) throws MetaException, + NoSuchObjectException, TException { + List parts = client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts); + return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name, + String filter, int max_parts) throws MetaException, + NoSuchObjectException, TException { + return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( + client.get_part_specs_by_filter(db_name, tbl_name, filter, max_parts))); + } + + @Override + public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr, + String default_partition_name, short max_parts, List result) + throws TException { + assert result != null; + PartitionsByExprRequest req = new PartitionsByExprRequest( + db_name, tbl_name, ByteBuffer.wrap(expr)); + if (default_partition_name != null) { + req.setDefaultPartitionName(default_partition_name); + } + if (max_parts >= 0) { + req.setMaxParts(max_parts); + } + PartitionsByExprResult r; + try { + r = client.get_partitions_by_expr(req); + } catch (TApplicationException te) { + // TODO: backward compat for Hive <= 0.12. Can be removed later. + if (te.getType() != TApplicationException.UNKNOWN_METHOD + && te.getType() != TApplicationException.WRONG_METHOD_NAME) { + throw te; + } + throw new IncompatibleMetastoreException( + "Metastore doesn't support listPartitionsByExpr: " + te.getMessage()); + } + if (fastpath) { + result.addAll(r.getPartitions()); + } else { + r.setPartitions(filterHook.filterPartitions(r.getPartitions())); + // TODO: in these methods, do we really need to deepcopy? + deepCopyPartitions(r.getPartitions(), result); + } + return !r.isSetHasUnknownPartitions() || r.isHasUnknownPartitions(); // Assume the worst. + } + + /** + * @param name + * @return the database + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_database(java.lang.String) + */ + @Override + public Database getDatabase(String name) throws NoSuchObjectException, + MetaException, TException { + Database d = client.get_database(name); + return fastpath ? d :deepCopy(filterHook.filterDatabase(d)); + } + + /** + * @param tbl_name + * @param db_name + * @param part_vals + * @return the partition + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, + * java.lang.String, java.util.List) + */ + @Override + public Partition getPartition(String db_name, String tbl_name, + List part_vals) throws NoSuchObjectException, MetaException, TException { + Partition p = client.get_partition(db_name, tbl_name, part_vals); + return fastpath ? p : deepCopy(filterHook.filterPartition(p)); + } + + @Override + public List getPartitionsByNames(String db_name, String tbl_name, + List part_names) throws NoSuchObjectException, MetaException, TException { + List parts = client.get_partitions_by_names(db_name, tbl_name, part_names); + return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request) + throws MetaException, TException, NoSuchObjectException { + return client.get_partition_values(request); + } + + @Override + public Partition getPartitionWithAuthInfo(String db_name, String tbl_name, + List part_vals, String user_name, List group_names) + throws MetaException, UnknownTableException, NoSuchObjectException, + TException { + Partition p = client.get_partition_with_auth(db_name, tbl_name, part_vals, user_name, + group_names); + return fastpath ? p : deepCopy(filterHook.filterPartition(p)); + } + + /** + * @param name + * @param dbname + * @return the table + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + * @throws NoSuchObjectException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_table(java.lang.String, + * java.lang.String) + */ + @Override + public Table getTable(String dbname, String name) throws MetaException, + TException, NoSuchObjectException { + GetTableRequest req = new GetTableRequest(dbname, name); + req.setCapabilities(version); + Table t = client.get_table_req(req).getTable(); + return fastpath ? t : deepCopy(filterHook.filterTable(t)); + } + + /** {@inheritDoc} */ + @Override + public List
getTableObjectsByName(String dbName, List tableNames) + throws MetaException, InvalidOperationException, UnknownDBException, TException { + GetTablesRequest req = new GetTablesRequest(dbName); + req.setTblNames(tableNames); + req.setCapabilities(version); + List
tabs = client.get_table_objects_by_name_req(req).getTables(); + return fastpath ? tabs : deepCopyTables(filterHook.filterTables(tabs)); + } + + /** {@inheritDoc} */ + @Override + public Map getMaterializationsInvalidationInfo(String dbName, List viewNames) + throws MetaException, InvalidOperationException, UnknownDBException, TException { + return client.get_materialization_invalidation_info( + dbName, filterHook.filterTableNames(null, dbName, viewNames)); + } + + /** {@inheritDoc} */ + @Override + public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm) + throws MetaException, InvalidOperationException, UnknownDBException, TException { + client.update_creation_metadata(null, dbName, tableName, cm); + } + + /** {@inheritDoc} */ + @Override + public List listTableNamesByFilter(String dbName, String filter, short maxTables) + throws MetaException, TException, InvalidOperationException, UnknownDBException { + return filterHook.filterTableNames(null, dbName, + client.get_table_names_by_filter(dbName, filter, maxTables)); + } + + /** + * @param name + * @return the type + * @throws MetaException + * @throws TException + * @throws NoSuchObjectException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type(java.lang.String) + */ + public Type getType(String name) throws NoSuchObjectException, MetaException, TException { + return deepCopy(client.get_type(name)); + } + + /** {@inheritDoc} */ + @Override + public List getTables(String dbname, String tablePattern) throws MetaException { + try { + return filterHook.filterTableNames(null, dbname, client.get_tables(dbname, tablePattern)); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + /** {@inheritDoc} */ + @Override + public List getTables(String dbname, String tablePattern, TableType tableType) throws MetaException { + try { + return filterHook.filterTableNames(null, dbname, + client.get_tables_by_type(dbname, tablePattern, tableType.toString())); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + /** {@inheritDoc} */ + @Override + public List getMaterializedViewsForRewriting(String dbname) throws MetaException { + try { + return filterHook.filterTableNames(null, dbname, client.get_materialized_views_for_rewriting(dbname)); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + @Override + public List getTableMeta(String dbPatterns, String tablePatterns, List tableTypes) + throws MetaException { + try { + return filterNames(client.get_table_meta(dbPatterns, tablePatterns, tableTypes)); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + private List filterNames(List metas) throws MetaException { + Map sources = new LinkedHashMap<>(); + Map> dbTables = new LinkedHashMap<>(); + for (TableMeta meta : metas) { + sources.put(meta.getDbName() + "." + meta.getTableName(), meta); + List tables = dbTables.get(meta.getDbName()); + if (tables == null) { + dbTables.put(meta.getDbName(), tables = new ArrayList<>()); + } + tables.add(meta.getTableName()); + } + List filtered = new ArrayList<>(); + for (Map.Entry> entry : dbTables.entrySet()) { + for (String table : filterHook.filterTableNames(null, entry.getKey(), entry.getValue())) { + filtered.add(sources.get(entry.getKey() + "." + table)); + } + } + return filtered; + } + + /** {@inheritDoc} */ + @Override + public List getAllTables(String dbname) throws MetaException { + try { + return filterHook.filterTableNames(null, dbname, client.get_all_tables(dbname)); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + @Override + public boolean tableExists(String databaseName, String tableName) throws MetaException, + TException, UnknownDBException { + try { + GetTableRequest req = new GetTableRequest(databaseName, tableName); + req.setCapabilities(version); + return filterHook.filterTable(client.get_table_req(req).getTable()) != null; + } catch (NoSuchObjectException e) { + return false; + } + } + + @Override + public List listPartitionNames(String dbName, String tblName, + short max) throws NoSuchObjectException, MetaException, TException { + return filterHook.filterPartitionNames(null, dbName, tblName, + client.get_partition_names(dbName, tblName, max)); + } + + @Override + public List listPartitionNames(String db_name, String tbl_name, + List part_vals, short max_parts) + throws MetaException, TException, NoSuchObjectException { + return filterHook.filterPartitionNames(null, db_name, tbl_name, + client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts)); + } + + /** + * Get number of partitions matching specified filter + * @param db_name the database name + * @param tbl_name the table name + * @param filter the filter string, + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * be done only on string partition keys. + * @return number of partitions + * @throws MetaException + * @throws NoSuchObjectException + * @throws TException + */ + @Override + public int getNumPartitionsByFilter(String db_name, String tbl_name, + String filter) throws MetaException, + NoSuchObjectException, TException { + return client.get_num_partitions_by_filter(db_name, tbl_name, filter); + } + + @Override + public void alter_partition(String dbName, String tblName, Partition newPart) + throws InvalidOperationException, MetaException, TException { + client.alter_partition_with_environment_context(dbName, tblName, newPart, null); + } + + @Override + public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) + throws InvalidOperationException, MetaException, TException { + client.alter_partition_with_environment_context(dbName, tblName, newPart, environmentContext); + } + + @Override + public void alter_partitions(String dbName, String tblName, List newParts) + throws InvalidOperationException, MetaException, TException { + client.alter_partitions_with_environment_context(dbName, tblName, newParts, null); + } + + @Override + public void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) + throws InvalidOperationException, MetaException, TException { + client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext); + } + + @Override + public void alterDatabase(String dbName, Database db) + throws MetaException, NoSuchObjectException, TException { + client.alter_database(dbName, db); + } + /** + * @param db + * @param tableName + * @throws UnknownTableException + * @throws UnknownDBException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String, + * java.lang.String) + */ + @Override + public List getFields(String db, String tableName) + throws MetaException, TException, UnknownTableException, + UnknownDBException { + List fields = client.get_fields(db, tableName); + return fastpath ? fields : deepCopyFieldSchemas(fields); + } + + @Override + public List getPrimaryKeys(PrimaryKeysRequest req) + throws MetaException, NoSuchObjectException, TException { + return client.get_primary_keys(req).getPrimaryKeys(); + } + + @Override + public List getForeignKeys(ForeignKeysRequest req) throws MetaException, + NoSuchObjectException, TException { + return client.get_foreign_keys(req).getForeignKeys(); + } + + @Override + public List getUniqueConstraints(UniqueConstraintsRequest req) + throws MetaException, NoSuchObjectException, TException { + return client.get_unique_constraints(req).getUniqueConstraints(); + } + + @Override + public List getNotNullConstraints(NotNullConstraintsRequest req) + throws MetaException, NoSuchObjectException, TException { + return client.get_not_null_constraints(req).getNotNullConstraints(); + } + + @Override + public List getDefaultConstraints(DefaultConstraintsRequest req) + throws MetaException, NoSuchObjectException, TException { + return client.get_default_constraints(req).getDefaultConstraints(); + } + + @Override + public List getCheckConstraints(CheckConstraintsRequest request) throws + MetaException, NoSuchObjectException, TException { + return client.get_check_constraints(request).getCheckConstraints(); + } + + /** {@inheritDoc} */ + @Override + @Deprecated + //use setPartitionColumnStatistics instead + public boolean updateTableColumnStatistics(ColumnStatistics statsObj) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, + InvalidInputException{ + return client.update_table_column_statistics(statsObj); + } + + /** {@inheritDoc} */ + @Override + @Deprecated + //use setPartitionColumnStatistics instead + public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, + InvalidInputException{ + return client.update_partition_column_statistics(statsObj); + } + + /** {@inheritDoc} */ + @Override + public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, + InvalidInputException{ + return client.set_aggr_stats_for(request); + } + + @Override + public void flushCache() { + try { + client.flushCache(); + } catch (TException e) { + // Not much we can do about it honestly + LOG.warn("Got error flushing the cache", e); + } + } + + /** {@inheritDoc} */ + @Override + public List getTableColumnStatistics(String dbName, String tableName, + List colNames) throws NoSuchObjectException, MetaException, TException, + InvalidInputException, InvalidObjectException { + return client.get_table_statistics_req( + new TableStatsRequest(dbName, tableName, colNames)).getTableStats(); + } + + /** {@inheritDoc} */ + @Override + public Map> getPartitionColumnStatistics( + String dbName, String tableName, List partNames, List colNames) + throws NoSuchObjectException, MetaException, TException { + return client.get_partitions_statistics_req( + new PartitionsStatsRequest(dbName, tableName, colNames, partNames)).getPartStats(); + } + + /** {@inheritDoc} */ + @Override + public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, + String colName) throws NoSuchObjectException, InvalidObjectException, MetaException, + TException, InvalidInputException + { + return client.delete_partition_column_statistics(dbName, tableName, partName, colName); + } + + /** {@inheritDoc} */ + @Override + public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, + InvalidInputException + { + return client.delete_table_column_statistics(dbName, tableName, colName); + } + + /** + * @param db + * @param tableName + * @throws UnknownTableException + * @throws UnknownDBException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String, + * java.lang.String) + */ + @Override + public List getSchema(String db, String tableName) + throws MetaException, TException, UnknownTableException, + UnknownDBException { + EnvironmentContext envCxt = null; + String addedJars = MetastoreConf.getVar(conf, ConfVars.ADDED_JARS); + if(org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) { + Map props = new HashMap(); + props.put("hive.added.jars.path", addedJars); + envCxt = new EnvironmentContext(props); + } + + List fields = client.get_schema_with_environment_context(db, tableName, envCxt); + return fastpath ? fields : deepCopyFieldSchemas(fields); + } + + @Override + public String getConfigValue(String name, String defaultValue) + throws TException, ConfigValSecurityException { + return client.get_config_value(name, defaultValue); + } + + @Override + public Partition getPartition(String db, String tableName, String partName) + throws MetaException, TException, UnknownTableException, NoSuchObjectException { + Partition p = client.get_partition_by_name(db, tableName, partName); + return fastpath ? p : deepCopy(filterHook.filterPartition(p)); + } + + public Partition appendPartitionByName(String dbName, String tableName, String partName) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + return appendPartitionByName(dbName, tableName, partName, null); + } + + public Partition appendPartitionByName(String dbName, String tableName, String partName, + EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, + MetaException, TException { + Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName, + partName, envContext); + return fastpath ? p : deepCopy(p); + } + + public boolean dropPartitionByName(String dbName, String tableName, String partName, + boolean deleteData) throws NoSuchObjectException, MetaException, TException { + return dropPartitionByName(dbName, tableName, partName, deleteData, null); + } + + public boolean dropPartitionByName(String dbName, String tableName, String partName, + boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException, + MetaException, TException { + return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName, + deleteData, envContext); + } + + private HiveMetaHook getHook(Table tbl) throws MetaException { + if (hookLoader == null) { + return null; + } + return hookLoader.getHook(tbl); + } + + @Override + public List partitionNameToVals(String name) throws MetaException, TException { + return client.partition_name_to_vals(name); + } + + @Override + public Map partitionNameToSpec(String name) throws MetaException, TException{ + return client.partition_name_to_spec(name); + } + + /** + * @param partition + * @return + */ + private Partition deepCopy(Partition partition) { + Partition copy = null; + if (partition != null) { + copy = new Partition(partition); + } + return copy; + } + + private Database deepCopy(Database database) { + Database copy = null; + if (database != null) { + copy = new Database(database); + } + return copy; + } + + protected Table deepCopy(Table table) { + Table copy = null; + if (table != null) { + copy = new Table(table); + } + return copy; + } + + private Type deepCopy(Type type) { + Type copy = null; + if (type != null) { + copy = new Type(type); + } + return copy; + } + + private FieldSchema deepCopy(FieldSchema schema) { + FieldSchema copy = null; + if (schema != null) { + copy = new FieldSchema(schema); + } + return copy; + } + + private Function deepCopy(Function func) { + Function copy = null; + if (func != null) { + copy = new Function(func); + } + return copy; + } + + protected PrincipalPrivilegeSet deepCopy(PrincipalPrivilegeSet pps) { + PrincipalPrivilegeSet copy = null; + if (pps != null) { + copy = new PrincipalPrivilegeSet(pps); + } + return copy; + } + + private List deepCopyPartitions(List partitions) { + return deepCopyPartitions(partitions, null); + } + + private List deepCopyPartitions( + Collection src, List dest) { + if (src == null) { + return dest; + } + if (dest == null) { + dest = new ArrayList(src.size()); + } + for (Partition part : src) { + dest.add(deepCopy(part)); + } + return dest; + } + + private List
deepCopyTables(List
tables) { + List
copy = null; + if (tables != null) { + copy = new ArrayList
(); + for (Table tab : tables) { + copy.add(deepCopy(tab)); + } + } + return copy; + } + + protected List deepCopyFieldSchemas(List schemas) { + List copy = null; + if (schemas != null) { + copy = new ArrayList(); + for (FieldSchema schema : schemas) { + copy.add(deepCopy(schema)); + } + } + return copy; + } + + @Override + public boolean grant_role(String roleName, String userName, + PrincipalType principalType, String grantor, PrincipalType grantorType, + boolean grantOption) throws MetaException, TException { + GrantRevokeRoleRequest req = new GrantRevokeRoleRequest(); + req.setRequestType(GrantRevokeType.GRANT); + req.setRoleName(roleName); + req.setPrincipalName(userName); + req.setPrincipalType(principalType); + req.setGrantor(grantor); + req.setGrantorType(grantorType); + req.setGrantOption(grantOption); + GrantRevokeRoleResponse res = client.grant_revoke_role(req); + if (!res.isSetSuccess()) { + throw new MetaException("GrantRevokeResponse missing success field"); + } + return res.isSuccess(); + } + + @Override + public boolean create_role(Role role) + throws MetaException, TException { + return client.create_role(role); + } + + @Override + public boolean drop_role(String roleName) throws MetaException, TException { + return client.drop_role(roleName); + } + + @Override + public List list_roles(String principalName, + PrincipalType principalType) throws MetaException, TException { + return client.list_roles(principalName, principalType); + } + + @Override + public List listRoleNames() throws MetaException, TException { + return client.get_role_names(); + } + + @Override + public GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest req) + throws MetaException, TException { + return client.get_principals_in_role(req); + } + + @Override + public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( + GetRoleGrantsForPrincipalRequest getRolePrincReq) throws MetaException, TException { + return client.get_role_grants_for_principal(getRolePrincReq); + } + + @Override + public boolean grant_privileges(PrivilegeBag privileges) + throws MetaException, TException { + GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest(); + req.setRequestType(GrantRevokeType.GRANT); + req.setPrivileges(privileges); + GrantRevokePrivilegeResponse res = client.grant_revoke_privileges(req); + if (!res.isSetSuccess()) { + throw new MetaException("GrantRevokePrivilegeResponse missing success field"); + } + return res.isSuccess(); + } + + @Override + public boolean revoke_role(String roleName, String userName, + PrincipalType principalType, boolean grantOption) throws MetaException, TException { + GrantRevokeRoleRequest req = new GrantRevokeRoleRequest(); + req.setRequestType(GrantRevokeType.REVOKE); + req.setRoleName(roleName); + req.setPrincipalName(userName); + req.setPrincipalType(principalType); + req.setGrantOption(grantOption); + GrantRevokeRoleResponse res = client.grant_revoke_role(req); + if (!res.isSetSuccess()) { + throw new MetaException("GrantRevokeResponse missing success field"); + } + return res.isSuccess(); + } + + @Override + public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) throws MetaException, + TException { + GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest(); + req.setRequestType(GrantRevokeType.REVOKE); + req.setPrivileges(privileges); + req.setRevokeGrantOption(grantOption); + GrantRevokePrivilegeResponse res = client.grant_revoke_privileges(req); + if (!res.isSetSuccess()) { + throw new MetaException("GrantRevokePrivilegeResponse missing success field"); + } + return res.isSuccess(); + } + + @Override + public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, + String userName, List groupNames) throws MetaException, + TException { + return client.get_privilege_set(hiveObject, userName, groupNames); + } + + @Override + public List list_privileges(String principalName, + PrincipalType principalType, HiveObjectRef hiveObject) + throws MetaException, TException { + return client.list_privileges(principalName, principalType, hiveObject); + } + + public String getDelegationToken(String renewerKerberosPrincipalName) throws + MetaException, TException, IOException { + //a convenience method that makes the intended owner for the delegation + //token request the current user + String owner = SecurityUtils.getUser(); + return getDelegationToken(owner, renewerKerberosPrincipalName); + } + + @Override + public String getDelegationToken(String owner, String renewerKerberosPrincipalName) throws + MetaException, TException { + // This is expected to be a no-op, so we will return null when we use local metastore. + if (localMetaStore) { + return null; + } + return client.get_delegation_token(owner, renewerKerberosPrincipalName); + } + + @Override + public long renewDelegationToken(String tokenStrForm) throws MetaException, TException { + if (localMetaStore) { + return 0; + } + return client.renew_delegation_token(tokenStrForm); + + } + + @Override + public void cancelDelegationToken(String tokenStrForm) throws MetaException, TException { + if (localMetaStore) { + return; + } + client.cancel_delegation_token(tokenStrForm); + } + + @Override + public boolean addToken(String tokenIdentifier, String delegationToken) throws TException { + return client.add_token(tokenIdentifier, delegationToken); + } + + @Override + public boolean removeToken(String tokenIdentifier) throws TException { + return client.remove_token(tokenIdentifier); + } + + @Override + public String getToken(String tokenIdentifier) throws TException { + return client.get_token(tokenIdentifier); + } + + @Override + public List getAllTokenIdentifiers() throws TException { + return client.get_all_token_identifiers(); + } + + @Override + public int addMasterKey(String key) throws MetaException, TException { + return client.add_master_key(key); + } + + @Override + public void updateMasterKey(Integer seqNo, String key) + throws NoSuchObjectException, MetaException, TException { + client.update_master_key(seqNo, key); + } + + @Override + public boolean removeMasterKey(Integer keySeq) throws TException { + return client.remove_master_key(keySeq); + } + + @Override + public String[] getMasterKeys() throws TException { + List keyList = client.get_master_keys(); + return keyList.toArray(new String[keyList.size()]); + } + + @Override + public ValidTxnList getValidTxns() throws TException { + return TxnUtils.createValidReadTxnList(client.get_open_txns(), 0); + } + + @Override + public ValidTxnList getValidTxns(long currentTxn) throws TException { + return TxnUtils.createValidReadTxnList(client.get_open_txns(), currentTxn); + } + + @Override + public ValidWriteIdList getValidWriteIds(String fullTableName) throws TException { + GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(Collections.singletonList(fullTableName), null); + GetValidWriteIdsResponse validWriteIds = client.get_valid_write_ids(rqst); + return TxnUtils.createValidReaderWriteIdList(validWriteIds.getTblValidWriteIds().get(0)); + } + + @Override + public ValidTxnWriteIdList getValidWriteIds(Long currentTxnId, List tablesList, String validTxnList) + throws TException { + GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(tablesList, validTxnList); + return TxnUtils.createValidTxnWriteIdList(currentTxnId, client.get_valid_write_ids(rqst)); + } + + @Override + public long openTxn(String user) throws TException { + OpenTxnsResponse txns = openTxns(user, 1); + return txns.getTxn_ids().get(0); + } + + @Override + public OpenTxnsResponse openTxns(String user, int numTxns) throws TException { + String hostname = null; + try { + hostname = InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + LOG.error("Unable to resolve my host name " + e.getMessage()); + throw new RuntimeException(e); + } + return client.open_txns(new OpenTxnRequest(numTxns, user, hostname)); + } + + @Override + public void rollbackTxn(long txnid) throws NoSuchTxnException, TException { + client.abort_txn(new AbortTxnRequest(txnid)); + } + + @Override + public void commitTxn(long txnid) + throws NoSuchTxnException, TxnAbortedException, TException { + client.commit_txn(new CommitTxnRequest(txnid)); + } + + @Override + public GetOpenTxnsInfoResponse showTxns() throws TException { + return client.get_open_txns_info(); + } + + @Override + public void abortTxns(List txnids) throws NoSuchTxnException, TException { + client.abort_txns(new AbortTxnsRequest(txnids)); + } + + @Override + public long allocateTableWriteId(long txnId, String dbName, String tableName) throws TException { + return allocateTableWriteIdsBatch(Collections.singletonList(txnId), dbName, tableName).get(0).getWriteId(); + } + + @Override + public List allocateTableWriteIdsBatch(List txnIds, String dbName, String tableName) + throws TException { + AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest(txnIds, dbName, tableName); + AllocateTableWriteIdsResponse writeIds = client.allocate_table_write_ids(rqst); + return writeIds.getTxnToWriteIds(); + } + + @Override + public LockResponse lock(LockRequest request) + throws NoSuchTxnException, TxnAbortedException, TException { + return client.lock(request); + } + + @Override + public LockResponse checkLock(long lockid) + throws NoSuchTxnException, TxnAbortedException, NoSuchLockException, + TException { + return client.check_lock(new CheckLockRequest(lockid)); + } + + @Override + public void unlock(long lockid) + throws NoSuchLockException, TxnOpenException, TException { + client.unlock(new UnlockRequest(lockid)); + } + + @Override + @Deprecated + public ShowLocksResponse showLocks() throws TException { + return client.show_locks(new ShowLocksRequest()); + } + + @Override + public ShowLocksResponse showLocks(ShowLocksRequest request) throws TException { + return client.show_locks(request); + } + + @Override + public void heartbeat(long txnid, long lockid) + throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, + TException { + HeartbeatRequest hb = new HeartbeatRequest(); + hb.setLockid(lockid); + hb.setTxnid(txnid); + client.heartbeat(hb); + } + + @Override + public HeartbeatTxnRangeResponse heartbeatTxnRange(long min, long max) + throws NoSuchTxnException, TxnAbortedException, TException { + HeartbeatTxnRangeRequest rqst = new HeartbeatTxnRangeRequest(min, max); + return client.heartbeat_txn_range(rqst); + } + + @Override + @Deprecated + public void compact(String dbname, String tableName, String partitionName, CompactionType type) + throws TException { + CompactionRequest cr = new CompactionRequest(); + if (dbname == null) { + cr.setDbname(DEFAULT_DATABASE_NAME); + } else { + cr.setDbname(dbname); + } + cr.setTablename(tableName); + if (partitionName != null) { + cr.setPartitionname(partitionName); + } + cr.setType(type); + client.compact(cr); + } + @Deprecated + @Override + public void compact(String dbname, String tableName, String partitionName, CompactionType type, + Map tblproperties) throws TException { + compact2(dbname, tableName, partitionName, type, tblproperties); + } + + @Override + public CompactionResponse compact2(String dbname, String tableName, String partitionName, CompactionType type, + Map tblproperties) throws TException { + CompactionRequest cr = new CompactionRequest(); + if (dbname == null) { + cr.setDbname(DEFAULT_DATABASE_NAME); + } else { + cr.setDbname(dbname); + } + cr.setTablename(tableName); + if (partitionName != null) { + cr.setPartitionname(partitionName); + } + cr.setType(type); + cr.setProperties(tblproperties); + return client.compact2(cr); + } + @Override + public ShowCompactResponse showCompactions() throws TException { + return client.show_compact(new ShowCompactRequest()); + } + + @Deprecated + @Override + public void addDynamicPartitions(long txnId, long writeId, String dbName, String tableName, + List partNames) throws TException { + client.add_dynamic_partitions(new AddDynamicPartitions(txnId, writeId, dbName, tableName, partNames)); + } + @Override + public void addDynamicPartitions(long txnId, long writeId, String dbName, String tableName, + List partNames, DataOperationType operationType) throws TException { + AddDynamicPartitions adp = new AddDynamicPartitions(txnId, writeId, dbName, tableName, partNames); + adp.setOperationType(operationType); + client.add_dynamic_partitions(adp); + } + + @Override + public void insertTable(Table table, boolean overwrite) throws MetaException { + boolean failed = true; + HiveMetaHook hook = getHook(table); + if (hook == null || !(hook instanceof DefaultHiveMetaHook)) { + return; + } + DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook; + try { + hiveMetaHook.commitInsertTable(table, overwrite); + failed = false; + } + finally { + if (failed) { + hiveMetaHook.rollbackInsertTable(table, overwrite); + } + } + } + + @InterfaceAudience.LimitedPrivate({"HCatalog"}) + @Override + public NotificationEventResponse getNextNotification(long lastEventId, int maxEvents, + NotificationFilter filter) throws TException { + NotificationEventRequest rqst = new NotificationEventRequest(lastEventId); + rqst.setMaxEvents(maxEvents); + NotificationEventResponse rsp = client.get_next_notification(rqst); + LOG.debug("Got back " + rsp.getEventsSize() + " events"); + if (filter == null) { + return rsp; + } else { + NotificationEventResponse filtered = new NotificationEventResponse(); + if (rsp != null && rsp.getEvents() != null) { + for (NotificationEvent e : rsp.getEvents()) { + if (filter.accept(e)) { + filtered.addToEvents(e); + } + } + } + return filtered; + } + } + + @InterfaceAudience.LimitedPrivate({"HCatalog"}) + @Override + public CurrentNotificationEventId getCurrentNotificationEventId() throws TException { + return client.get_current_notificationEventId(); + } + + @InterfaceAudience.LimitedPrivate({"HCatalog"}) + @Override + public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) + throws TException { + return client.get_notification_events_count(rqst); + } + + @InterfaceAudience.LimitedPrivate({"Apache Hive, HCatalog"}) + @Override + public FireEventResponse fireListenerEvent(FireEventRequest rqst) throws TException { + return client.fire_listener_event(rqst); + } + + /** + * Creates a synchronized wrapper for any {@link IMetaStoreClient}. + * This may be used by multi-threaded applications until we have + * fixed all reentrancy bugs. + * + * @param client unsynchronized client + * + * @return synchronized client + */ + public static IMetaStoreClient newSynchronizedClient( + IMetaStoreClient client) { + return (IMetaStoreClient) Proxy.newProxyInstance( + HiveMetaStoreClientPreCatalog.class.getClassLoader(), + new Class [] { IMetaStoreClient.class }, + new SynchronizedHandler(client)); + } + + private static class SynchronizedHandler implements InvocationHandler { + private final IMetaStoreClient client; + + SynchronizedHandler(IMetaStoreClient client) { + this.client = client; + } + + @Override + public synchronized Object invoke(Object proxy, Method method, Object [] args) + throws Throwable { + try { + return method.invoke(client, args); + } catch (InvocationTargetException e) { + throw e.getTargetException(); + } + } + } + + @Override + public void markPartitionForEvent(String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) + throws MetaException, TException, NoSuchObjectException, UnknownDBException, + UnknownTableException, + InvalidPartitionException, UnknownPartitionException { + assert db_name != null; + assert tbl_name != null; + assert partKVs != null; + client.markPartitionForEvent(db_name, tbl_name, partKVs, eventType); + } + + @Override + public boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) + throws MetaException, NoSuchObjectException, UnknownTableException, UnknownDBException, TException, + InvalidPartitionException, UnknownPartitionException { + assert db_name != null; + assert tbl_name != null; + assert partKVs != null; + return client.isPartitionMarkedForEvent(db_name, tbl_name, partKVs, eventType); + } + + @Override + public void createFunction(Function func) throws InvalidObjectException, + MetaException, TException { + client.create_function(func); + } + + @Override + public void alterFunction(String dbName, String funcName, Function newFunction) + throws InvalidObjectException, MetaException, TException { + client.alter_function(dbName, funcName, newFunction); + } + + @Override + public void dropFunction(String dbName, String funcName) + throws MetaException, NoSuchObjectException, InvalidObjectException, + InvalidInputException, TException { + client.drop_function(dbName, funcName); + } + + @Override + public Function getFunction(String dbName, String funcName) + throws MetaException, TException { + Function f = client.get_function(dbName, funcName); + return fastpath ? f : deepCopy(f); + } + + @Override + public List getFunctions(String dbName, String pattern) + throws MetaException, TException { + return client.get_functions(dbName, pattern); + } + + @Override + public GetAllFunctionsResponse getAllFunctions() + throws MetaException, TException { + return client.get_all_functions(); + } + + protected void create_table_with_environment_context(Table tbl, EnvironmentContext envContext) + throws AlreadyExistsException, InvalidObjectException, + MetaException, NoSuchObjectException, TException { + client.create_table_with_environment_context(tbl, envContext); + } + + protected void drop_table_with_environment_context(String dbname, String name, + boolean deleteData, EnvironmentContext envContext) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + client.drop_table_with_environment_context(dbname, name, deleteData, envContext); + } + + @Override + public AggrStats getAggrColStatsFor(String dbName, String tblName, + List colNames, List partNames) throws NoSuchObjectException, MetaException, TException { + if (colNames.isEmpty() || partNames.isEmpty()) { + LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); + return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate + } + PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames); + return client.get_aggr_stats_for(req); + } + + @Override + public Iterable> getFileMetadata( + final List fileIds) throws TException { + return new MetastoreMapIterable() { + private int listIndex = 0; + @Override + protected Map fetchNextBatch() throws TException { + if (listIndex == fileIds.size()) { + return null; + } + int endIndex = Math.min(listIndex + fileMetadataBatchSize, fileIds.size()); + List subList = fileIds.subList(listIndex, endIndex); + GetFileMetadataResult resp = sendGetFileMetadataReq(subList); + // TODO: we could remember if it's unsupported and stop sending calls; although, it might + // be a bad idea for HS2+standalone metastore that could be updated with support. + // Maybe we should just remember this for some time. + if (!resp.isIsSupported()) { + return null; + } + listIndex = endIndex; + return resp.getMetadata(); + } + }; + } + + private GetFileMetadataResult sendGetFileMetadataReq(List fileIds) throws TException { + return client.get_file_metadata(new GetFileMetadataRequest(fileIds)); + } + + @Override + public Iterable> getFileMetadataBySarg( + final List fileIds, final ByteBuffer sarg, final boolean doGetFooters) + throws TException { + return new MetastoreMapIterable() { + private int listIndex = 0; + @Override + protected Map fetchNextBatch() throws TException { + if (listIndex == fileIds.size()) { + return null; + } + int endIndex = Math.min(listIndex + fileMetadataBatchSize, fileIds.size()); + List subList = fileIds.subList(listIndex, endIndex); + GetFileMetadataByExprResult resp = sendGetFileMetadataBySargReq( + sarg, subList, doGetFooters); + if (!resp.isIsSupported()) { + return null; + } + listIndex = endIndex; + return resp.getMetadata(); + } + }; + } + + private GetFileMetadataByExprResult sendGetFileMetadataBySargReq( + ByteBuffer sarg, List fileIds, boolean doGetFooters) throws TException { + GetFileMetadataByExprRequest req = new GetFileMetadataByExprRequest(fileIds, sarg); + req.setDoGetFooters(doGetFooters); // No need to get footers + return client.get_file_metadata_by_expr(req); + } + + public static abstract class MetastoreMapIterable + implements Iterable>, Iterator> { + private Iterator> currentIter; + + protected abstract Map fetchNextBatch() throws TException; + + @Override + public Iterator> iterator() { + return this; + } + + @Override + public boolean hasNext() { + ensureCurrentBatch(); + return currentIter != null; + } + + private void ensureCurrentBatch() { + if (currentIter != null && currentIter.hasNext()) { + return; + } + currentIter = null; + Map currentBatch; + do { + try { + currentBatch = fetchNextBatch(); + } catch (TException ex) { + throw new RuntimeException(ex); + } + if (currentBatch == null) + { + return; // No more data. + } + } while (currentBatch.isEmpty()); + currentIter = currentBatch.entrySet().iterator(); + } + + @Override + public Entry next() { + ensureCurrentBatch(); + if (currentIter == null) { + throw new NoSuchElementException(); + } + return currentIter.next(); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + } + + @Override + public void clearFileMetadata(List fileIds) throws TException { + ClearFileMetadataRequest req = new ClearFileMetadataRequest(); + req.setFileIds(fileIds); + client.clear_file_metadata(req); + } + + @Override + public void putFileMetadata(List fileIds, List metadata) throws TException { + PutFileMetadataRequest req = new PutFileMetadataRequest(); + req.setFileIds(fileIds); + req.setMetadata(metadata); + client.put_file_metadata(req); + } + + @Override + public boolean isSameConfObj(Configuration c) { + return conf == c; + } + + @Override + public boolean cacheFileMetadata( + String dbName, String tableName, String partName, boolean allParts) throws TException { + CacheFileMetadataRequest req = new CacheFileMetadataRequest(); + req.setDbName(dbName); + req.setTblName(tableName); + if (partName != null) { + req.setPartName(partName); + } else { + req.setIsAllParts(allParts); + } + CacheFileMetadataResult result = client.cache_file_metadata(req); + return result.isIsSupported(); + } + + @Override + public String getMetastoreDbUuid() throws TException { + return client.get_metastore_db_uuid(); + } + + @Override + public void createResourcePlan(WMResourcePlan resourcePlan, String copyFromName) + throws InvalidObjectException, MetaException, TException { + WMCreateResourcePlanRequest request = new WMCreateResourcePlanRequest(); + request.setResourcePlan(resourcePlan); + request.setCopyFrom(copyFromName); + client.create_resource_plan(request); + } + + @Override + public WMFullResourcePlan getResourcePlan(String resourcePlanName) + throws NoSuchObjectException, MetaException, TException { + WMGetResourcePlanRequest request = new WMGetResourcePlanRequest(); + request.setResourcePlanName(resourcePlanName); + return client.get_resource_plan(request).getResourcePlan(); + } + + @Override + public List getAllResourcePlans() + throws NoSuchObjectException, MetaException, TException { + WMGetAllResourcePlanRequest request = new WMGetAllResourcePlanRequest(); + return client.get_all_resource_plans(request).getResourcePlans(); + } + + @Override + public void dropResourcePlan(String resourcePlanName) + throws NoSuchObjectException, MetaException, TException { + WMDropResourcePlanRequest request = new WMDropResourcePlanRequest(); + request.setResourcePlanName(resourcePlanName); + client.drop_resource_plan(request); + } + + @Override + public WMFullResourcePlan alterResourcePlan(String resourcePlanName, WMNullableResourcePlan resourcePlan, + boolean canActivateDisabled, boolean isForceDeactivate, boolean isReplace) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + WMAlterResourcePlanRequest request = new WMAlterResourcePlanRequest(); + request.setResourcePlanName(resourcePlanName); + request.setResourcePlan(resourcePlan); + request.setIsEnableAndActivate(canActivateDisabled); + request.setIsForceDeactivate(isForceDeactivate); + request.setIsReplace(isReplace); + WMAlterResourcePlanResponse resp = client.alter_resource_plan(request); + return resp.isSetFullResourcePlan() ? resp.getFullResourcePlan() : null; + } + + @Override + public WMFullResourcePlan getActiveResourcePlan() throws MetaException, TException { + return client.get_active_resource_plan(new WMGetActiveResourcePlanRequest()).getResourcePlan(); + } + + @Override + public WMValidateResourcePlanResponse validateResourcePlan(String resourcePlanName) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + WMValidateResourcePlanRequest request = new WMValidateResourcePlanRequest(); + request.setResourcePlanName(resourcePlanName); + return client.validate_resource_plan(request); + } + + @Override + public void createWMTrigger(WMTrigger trigger) + throws InvalidObjectException, MetaException, TException { + WMCreateTriggerRequest request = new WMCreateTriggerRequest(); + request.setTrigger(trigger); + client.create_wm_trigger(request); + } + + @Override + public void alterWMTrigger(WMTrigger trigger) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + WMAlterTriggerRequest request = new WMAlterTriggerRequest(); + request.setTrigger(trigger); + client.alter_wm_trigger(request); + } + + @Override + public void dropWMTrigger(String resourcePlanName, String triggerName) + throws NoSuchObjectException, MetaException, TException { + WMDropTriggerRequest request = new WMDropTriggerRequest(); + request.setResourcePlanName(resourcePlanName); + request.setTriggerName(triggerName); + client.drop_wm_trigger(request); + } + + @Override + public List getTriggersForResourcePlan(String resourcePlan) + throws NoSuchObjectException, MetaException, TException { + WMGetTriggersForResourePlanRequest request = new WMGetTriggersForResourePlanRequest(); + request.setResourcePlanName(resourcePlan); + return client.get_triggers_for_resourceplan(request).getTriggers(); + } + + @Override + public void createWMPool(WMPool pool) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + WMCreatePoolRequest request = new WMCreatePoolRequest(); + request.setPool(pool); + client.create_wm_pool(request); + } + + @Override + public void alterWMPool(WMNullablePool pool, String poolPath) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + WMAlterPoolRequest request = new WMAlterPoolRequest(); + request.setPool(pool); + request.setPoolPath(poolPath); + client.alter_wm_pool(request); + } + + @Override + public void dropWMPool(String resourcePlanName, String poolPath) + throws NoSuchObjectException, MetaException, TException { + WMDropPoolRequest request = new WMDropPoolRequest(); + request.setResourcePlanName(resourcePlanName); + request.setPoolPath(poolPath); + client.drop_wm_pool(request); + } + + @Override + public void createOrUpdateWMMapping(WMMapping mapping, boolean isUpdate) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + WMCreateOrUpdateMappingRequest request = new WMCreateOrUpdateMappingRequest(); + request.setMapping(mapping); + request.setUpdate(isUpdate); + client.create_or_update_wm_mapping(request); + } + + @Override + public void dropWMMapping(WMMapping mapping) + throws NoSuchObjectException, MetaException, TException { + WMDropMappingRequest request = new WMDropMappingRequest(); + request.setMapping(mapping); + client.drop_wm_mapping(request); + } + + @Override + public void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName, + String poolPath, boolean shouldDrop) throws AlreadyExistsException, NoSuchObjectException, + InvalidObjectException, MetaException, TException { + WMCreateOrDropTriggerToPoolMappingRequest request = new WMCreateOrDropTriggerToPoolMappingRequest(); + request.setResourcePlanName(resourcePlanName); + request.setTriggerName(triggerName); + request.setPoolPath(poolPath); + request.setDrop(shouldDrop); + client.create_or_drop_wm_trigger_to_pool_mapping(request); + } + + @Override + public void createCatalog(Catalog catalog) throws AlreadyExistsException, InvalidObjectException, + MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Catalog getCatalog(String catName) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public List getCatalogs() throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void dropCatalog(String catName) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public List getDatabases(String catName, String databasePattern) throws MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public List getAllDatabases(String catName) throws MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public List getTables(String catName, String dbName, String tablePattern) throws + MetaException, TException, UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public List getTables(String catName, String dbName, String tablePattern, + TableType tableType) throws MetaException, TException, + UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public List getMaterializedViewsForRewriting(String catName, String dbName) throws + MetaException, TException, UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public List getTableMeta(String catName, String dbPatterns, String tablePatterns, + List tableTypes) throws MetaException, TException, + UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public List getAllTables(String catName, String dbName) throws MetaException, TException, + UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public List listTableNamesByFilter(String catName, String dbName, String filter, + int maxTables) throws TException, + InvalidOperationException, UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public void dropTable(String catName, String dbName, String tableName, boolean deleteData, + boolean ignoreUnknownTable, boolean ifPurge) throws MetaException, + NoSuchObjectException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public void truncateTable(String catName, String dbName, String tableName, + List partNames) throws MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean tableExists(String catName, String dbName, String tableName) throws MetaException, + TException, UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public Database getDatabase(String catalogName, String databaseName) throws NoSuchObjectException, + MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Table getTable(String catName, String dbName, String tableName) throws MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public List
getTableObjectsByName(String catName, String dbName, + List tableNames) throws MetaException, + InvalidOperationException, UnknownDBException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public void updateCreationMetadata(String catName, String dbName, String tableName, + CreationMetadata cm) throws MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Partition appendPartition(String catName, String dbName, String tableName, + List partVals) throws InvalidObjectException, + AlreadyExistsException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Partition appendPartition(String catName, String dbName, String tableName, + String name) throws InvalidObjectException, + AlreadyExistsException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Partition getPartition(String catName, String dbName, String tblName, + List partVals) throws NoSuchObjectException, MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public Partition exchange_partition(Map partitionSpecs, String sourceCat, + String sourceDb, String sourceTable, String destCat, + String destdb, String destTableName) throws MetaException, + NoSuchObjectException, InvalidObjectException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public List exchange_partitions(Map partitionSpecs, String sourceCat, + String sourceDb, String sourceTable, String destCat, + String destdb, String destTableName) throws + MetaException, NoSuchObjectException, InvalidObjectException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Partition getPartition(String catName, String dbName, String tblName, String name) throws + MetaException, UnknownTableException, NoSuchObjectException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Partition getPartitionWithAuthInfo(String catName, String dbName, String tableName, + List pvals, String userName, + List groupNames) throws MetaException, + UnknownTableException, NoSuchObjectException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public List listPartitions(String catName, String db_name, String tbl_name, + int max_parts) throws NoSuchObjectException, MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName, + int maxParts) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public List listPartitions(String catName, String db_name, String tbl_name, + List part_vals, int max_parts) throws + NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public List listPartitionNames(String catName, String db_name, String tbl_name, + int max_parts) throws NoSuchObjectException, MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public List listPartitionNames(String catName, String db_name, String tbl_name, + List part_vals, int max_parts) throws + MetaException, TException, NoSuchObjectException { + throw new UnsupportedOperationException(); + } + + @Override + public int getNumPartitionsByFilter(String catName, String dbName, String tableName, + String filter) throws MetaException, NoSuchObjectException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public List listPartitionsByFilter(String catName, String db_name, String tbl_name, + String filter, int max_parts) throws MetaException, + NoSuchObjectException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_name, + String tbl_name, String filter, + int max_parts) throws MetaException, + NoSuchObjectException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, byte[] expr, + String default_partition_name, int max_parts, + List result) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public List listPartitionsWithAuthInfo(String catName, String dbName, String tableName, + int maxParts, String userName, + List groupNames) throws MetaException, + TException, NoSuchObjectException { + throw new UnsupportedOperationException(); + } + + @Override + public List getPartitionsByNames(String catName, String db_name, String tbl_name, + List part_names) throws NoSuchObjectException, + MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public List listPartitionsWithAuthInfo(String catName, String dbName, String tableName, + List partialPvals, int maxParts, + String userName, List groupNames) throws + MetaException, TException, NoSuchObjectException { + throw new UnsupportedOperationException(); + } + + @Override + public void markPartitionForEvent(String catName, String db_name, String tbl_name, + Map partKVs, + PartitionEventType eventType) throws MetaException, + NoSuchObjectException, TException, UnknownTableException, UnknownDBException, + UnknownPartitionException, InvalidPartitionException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isPartitionMarkedForEvent(String catName, String db_name, String tbl_name, + Map partKVs, + PartitionEventType eventType) throws MetaException, + NoSuchObjectException, TException, UnknownTableException, UnknownDBException, + UnknownPartitionException, InvalidPartitionException { + throw new UnsupportedOperationException(); + } + + @Override + public void alter_table(String catName, String dbName, String tblName, Table newTable, + EnvironmentContext envContext) throws InvalidOperationException, + MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public void dropDatabase(String catName, String dbName, boolean deleteData, + boolean ignoreUnknownDb, boolean cascade) throws NoSuchObjectException, + InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public void alterDatabase(String catName, String dbName, Database newDb) throws + NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean dropPartition(String catName, String db_name, String tbl_name, + List part_vals, boolean deleteData) throws + NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean dropPartition(String catName, String db_name, String tbl_name, + List part_vals, PartitionDropOptions options) throws + NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public List dropPartitions(String catName, String dbName, String tblName, + List> partExprs, + PartitionDropOptions options) throws NoSuchObjectException, + MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean dropPartition(String catName, String db_name, String tbl_name, String name, + boolean deleteData) throws NoSuchObjectException, MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public void alter_partition(String catName, String dbName, String tblName, Partition newPart, + EnvironmentContext environmentContext) throws + InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public void alter_partitions(String catName, String dbName, String tblName, + List newParts, + EnvironmentContext environmentContext) throws + InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public void renamePartition(String catName, String dbname, String tableName, + List part_vals, Partition newPart) throws + InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public List getFields(String catName, String db, String tableName) throws + MetaException, TException, UnknownTableException, UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public List getSchema(String catName, String db, String tableName) throws + MetaException, TException, UnknownTableException, UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public List getTableColumnStatistics(String catName, String dbName, + String tableName, + List colNames) throws + NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Map> getPartitionColumnStatistics(String catName, + String dbName, + String tableName, + List partNames, + List colNames) throws + NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, + String partName, String colName) throws + NoSuchObjectException, MetaException, InvalidObjectException, TException, + InvalidInputException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, + String colName) throws NoSuchObjectException, + MetaException, InvalidObjectException, TException, InvalidInputException { + throw new UnsupportedOperationException(); + } + + @Override + public void alterFunction(String catName, String dbName, String funcName, + Function newFunction) throws InvalidObjectException, MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public void dropFunction(String catName, String dbName, String funcName) throws MetaException, + NoSuchObjectException, InvalidObjectException, InvalidInputException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Function getFunction(String catName, String dbName, String funcName) throws MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public List getFunctions(String catName, String dbName, String pattern) throws + MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, + List colNames, List partNames) throws + NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public void dropConstraint(String catName, String dbName, String tableName, + String constraintName) throws MetaException, NoSuchObjectException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public void createISchema(ISchema schema) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void alterISchema(String catName, String dbName, String schemaName, + ISchema newSchema) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public ISchema getISchema(String catName, String dbName, String name) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void dropISchema(String catName, String dbName, String name) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void addSchemaVersion(SchemaVersion schemaVersion) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public SchemaVersion getSchemaVersion(String catName, String dbName, String schemaName, + int version) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public SchemaVersion getSchemaLatestVersion(String catName, String dbName, + String schemaName) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public List getSchemaAllVersions(String catName, String dbName, + String schemaName) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void dropSchemaVersion(String catName, String dbName, String schemaName, + int version) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public FindSchemasByColsResp getSchemaByCols(FindSchemasByColsRqst rqst) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void mapSchemaVersionToSerde(String catName, String dbName, String schemaName, int version, + String serdeName) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void setSchemaVersionState(String catName, String dbName, String schemaName, int version, + SchemaVersionState state) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void addSerDe(SerDeInfo serDeInfo) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public SerDeInfo getSerDe(String serDeName) throws TException { + throw new UnsupportedOperationException(); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java index d7a40b608f..fdb0dc4413 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java @@ -88,13 +88,13 @@ public static void resetGetNextNotificationBehaviour(){ // ObjectStore methods to be overridden with injected behavior @Override - public Table getTable(String dbName, String tableName) throws MetaException { - return getTableModifier.apply(super.getTable(dbName, tableName)); + public Table getTable(String catName, String dbName, String tableName) throws MetaException { + return getTableModifier.apply(super.getTable(catName, dbName, tableName)); } @Override - public List listPartitionNames(String dbName, String tableName, short max) throws MetaException { - return listPartitionNamesModifier.apply(super.listPartitionNames(dbName, tableName, max)); + public List listPartitionNames(String catName, String dbName, String tableName, short max) throws MetaException { + return listPartitionNamesModifier.apply(super.listPartitionNames(catName, dbName, tableName, max)); } @Override diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java index 60fcb86bb1..1d12cf96ce 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hive.metastore; +import java.io.File; import java.io.IOException; import java.net.ConnectException; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; +import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; @@ -30,9 +32,12 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.events.EventCleanerTask; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + public class MetaStoreTestUtils { private static final Logger LOG = LoggerFactory.getLogger(MetaStoreTestUtils.class); public static final int RETRY_COUNT = 10; @@ -220,4 +225,28 @@ public static void setConfForStandloneMode(Configuration conf) { DefaultPartitionExpressionProxy.class, PartitionExpressionProxy.class); } } + + + public static String getTestWarehouseDir(String name) { + File dir = new File(System.getProperty("java.io.tmpdir"), name); + dir.deleteOnExit(); + return dir.getAbsolutePath(); + } + + /** + * There is no cascade option for dropping a catalog for security reasons. But this in + * inconvenient in tests, so this method does it. + * @param client metastore client + * @param catName catalog to drop, cannot be the default catalog + * @throws TException from underlying client calls + */ + public static void dropCatalogCascade(IMetaStoreClient client, String catName) throws TException { + if (catName != null && !catName.equals(DEFAULT_CATALOG_NAME)) { + List databases = client.getAllDatabases(catName); + for (String db : databases) { + client.dropDatabase(catName, db, true, false, true); + } + client.dropCatalog(catName); + } + } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java index b95f1f23a5..75ab4e01ee 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java @@ -41,6 +41,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + @Category(MetastoreUnitTest.class) public class TestAggregateStatsCache { static String DB_NAME = "db"; @@ -117,11 +119,11 @@ public void tearDown() { @Test public void testCacheKey() { - Key k1 = new Key("db", "tbl1", "col"); - Key k2 = new Key("db", "tbl1", "col"); + Key k1 = new Key("cat", "db", "tbl1", "col"); + Key k2 = new Key("cat", "db", "tbl1", "col"); // k1 equals k2 Assert.assertEquals(k1, k2); - Key k3 = new Key("db", "tbl2", "col"); + Key k3 = new Key("cat", "db", "tbl2", "col"); // k1 not equals k3 Assert.assertNotEquals(k1, k3); } @@ -140,16 +142,16 @@ public void testBasicAddAndGet() throws Exception { ColumnStatisticsObj aggrColStats = getDummyLongColStat(colName, highVal, lowVal, numDVs, numNulls); // Now add to cache the dummy colstats for these 10 partitions - cache.add(DB_NAME, tblName, colName, 10, aggrColStats, bloomFilter); + cache.add(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, 10, aggrColStats, bloomFilter); // Now get from cache - AggrColStats aggrStatsCached = cache.get(DB_NAME, tblName, colName, partNames); + AggrColStats aggrStatsCached = cache.get(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, partNames); Assert.assertNotNull(aggrStatsCached); ColumnStatisticsObj aggrColStatsCached = aggrStatsCached.getColStats(); Assert.assertEquals(aggrColStats, aggrColStatsCached); // Now get a non-existant entry - aggrStatsCached = cache.get("dbNotThere", tblName, colName, partNames); + aggrStatsCached = cache.get(DEFAULT_CATALOG_NAME, "dbNotThere", tblName, colName, partNames); Assert.assertNull(aggrStatsCached); } @@ -167,25 +169,25 @@ public void testAddGetWithVariance() throws Exception { ColumnStatisticsObj aggrColStats = getDummyLongColStat(colName, highVal, lowVal, numDVs, numNulls); // Now add to cache - cache.add(DB_NAME, tblName, colName, 10, aggrColStats, bloomFilter); + cache.add(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, 10, aggrColStats, bloomFilter); // Now prepare partnames with only 5 partitions: [tab1part1...tab1part5] partNames = preparePartNames(tables.get(0), 1, 5); // This get should fail because its variance ((10-5)/5) is way past MAX_VARIANCE (0.5) - AggrColStats aggrStatsCached = cache.get(DB_NAME, tblName, colName, partNames); + AggrColStats aggrStatsCached = cache.get(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, partNames); Assert.assertNull(aggrStatsCached); // Now prepare partnames with 10 partitions: [tab1part11...tab1part20], but with no overlap partNames = preparePartNames(tables.get(0), 11, 20); // This get should fail because its variance ((10-0)/10) is way past MAX_VARIANCE (0.5) - aggrStatsCached = cache.get(DB_NAME, tblName, colName, partNames); + aggrStatsCached = cache.get(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, partNames); Assert.assertNull(aggrStatsCached); // Now prepare partnames with 9 partitions: [tab1part1...tab1part8], which are contained in the // object that we added to the cache partNames = preparePartNames(tables.get(0), 1, 8); // This get should succeed because its variance ((10-9)/9) is within past MAX_VARIANCE (0.5) - aggrStatsCached = cache.get(DB_NAME, tblName, colName, partNames); + aggrStatsCached = cache.get(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, partNames); Assert.assertNotNull(aggrStatsCached); ColumnStatisticsObj aggrColStatsCached = aggrStatsCached.getColStats(); Assert.assertEquals(aggrColStats, aggrColStatsCached); @@ -206,13 +208,13 @@ public void testTimeToLive() throws Exception { ColumnStatisticsObj aggrColStats = getDummyLongColStat(colName, highVal, lowVal, numDVs, numNulls); // Now add to cache - cache.add(DB_NAME, tblName, colName, 10, aggrColStats, bloomFilter); + cache.add(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, 10, aggrColStats, bloomFilter); // Sleep for 3 seconds Thread.sleep(3000); // Get should fail now (since TTL is 2s) and we've snoozed for 3 seconds - AggrColStats aggrStatsCached = cache.get(DB_NAME, tblName, colName, partNames); + AggrColStats aggrStatsCached = cache.get(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, partNames); Assert.assertNull(aggrStatsCached); } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogNonDefaultClient.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogNonDefaultClient.java new file mode 100644 index 0000000000..dfe05e98f0 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogNonDefaultClient.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.thrift.TException; +import org.junit.After; + +/** + * This tests metastore client calls that do not specify a catalog but with the config on the + * client set to go to a non-default catalog. + */ +public class TestCatalogNonDefaultClient extends TestNonCatCallsWithCatalog { + + final private String catName = "non_default_catalog"; + private String catLocation; + + @After + public void dropCatalog() throws TException { + MetaStoreTestUtils.dropCatalogCascade(client, catName); + } + + @Override + protected IMetaStoreClient getClient() throws Exception { + + Configuration svrConf = new Configuration(conf); + int port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), + svrConf); + // Only set the default catalog on the client. + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.THRIFT_URIS, "thrift://localhost:" + port); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CATALOG_DEFAULT, catName); + IMetaStoreClient client = new HiveMetaStoreClient(conf); + assert !client.isLocalMetaStore(); + // Don't make any calls but catalog calls until the catalog has been created, as we just told + // the client to direct all calls to a catalog that does not yet exist. + catLocation = MetaStoreTestUtils.getTestWarehouseDir(catName); + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(catLocation) + .build(); + client.createCatalog(cat); + return client; + } + + @Override + protected String expectedCatalog() { + return catName; + } + + @Override + protected String expectedBaseDir() throws MetaException { + return catLocation; + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogNonDefaultSvr.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogNonDefaultSvr.java new file mode 100644 index 0000000000..13c8723b53 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogNonDefaultSvr.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.thrift.TException; +import org.junit.After; + +/** + * This tests metastore client calls that do not specify a catalog but with the config on the + * server set to go to a non-default catalog. + */ +public class TestCatalogNonDefaultSvr extends TestNonCatCallsWithCatalog { + + final private String catName = "non_default_svr_catalog"; + private String catLocation; + private IMetaStoreClient catalogCapableClient; + + @After + public void dropCatalog() throws TException { + MetaStoreTestUtils.dropCatalogCascade(catalogCapableClient, catName); + catalogCapableClient.close(); + } + + @Override + protected IMetaStoreClient getClient() throws Exception { + // Separate client to create the catalog + catalogCapableClient = new HiveMetaStoreClient(conf); + catLocation = MetaStoreTestUtils.getTestWarehouseDir(catName); + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(catLocation) + .build(); + catalogCapableClient.createCatalog(cat); + catalogCapableClient.close(); + + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CATALOG_DEFAULT, catName); + return new HiveMetaStoreClientPreCatalog(conf); + } + + @Override + protected String expectedCatalog() { + return catName; + } + + @Override + protected String expectedBaseDir() throws MetaException { + return catLocation; + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogOldClient.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogOldClient.java new file mode 100644 index 0000000000..bb57b85d17 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogOldClient.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.hive.metastore.api.MetaException; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + +/** + * This tests calls with an older client, to make sure that if the client supplies no catalog + * information the server still does the right thing. I assumes the default catalog + */ +public class TestCatalogOldClient extends TestNonCatCallsWithCatalog { + + @Override + protected IMetaStoreClient getClient() throws MetaException { + return new HiveMetaStoreClientPreCatalog(conf); + } + + @Override + protected String expectedCatalog() { + return DEFAULT_CATALOG_NAME; + } + + @Override + protected String expectedBaseDir() throws MetaException { + return new Warehouse(conf).getWhRoot().toUri().getPath(); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java index ea5dd3c390..7dc69bc4e9 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java @@ -75,11 +75,12 @@ public Database filterDatabase(Database dataBase) throws NoSuchObjectException { } @Override - public List filterTableNames(String dbName, List tableList) throws MetaException { + public List filterTableNames(String catName, String dbName, List tableList) + throws MetaException { if (blockResults) { return new ArrayList<>(); } - return super.filterTableNames(dbName, tableList); + return super.filterTableNames(catName, dbName, tableList); } @Override @@ -124,12 +125,12 @@ public Partition filterPartition(Partition partition) throws NoSuchObjectExcepti } @Override - public List filterPartitionNames(String dbName, String tblName, + public List filterPartitionNames(String catName, String dbName, String tblName, List partitionNames) throws MetaException { if (blockResults) { return new ArrayList<>(); } - return super.filterPartitionNames(dbName, tblName, partitionNames); + return super.filterPartitionNames(catName, dbName, tblName, partitionNames); } } @@ -159,36 +160,32 @@ public static void setUp() throws Exception { msc.dropDatabase(DBNAME2, true, true, true); Database db1 = new DatabaseBuilder() .setName(DBNAME1) - .build(); - msc.createDatabase(db1); + .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .create(msc, conf); Database db2 = new DatabaseBuilder() .setName(DBNAME2) - .build(); - msc.createDatabase(db2); - Table tab1 = new TableBuilder() + .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .create(msc, conf); + new TableBuilder() .setDbName(DBNAME1) .setTableName(TAB1) .addCol("id", "int") .addCol("name", "string") - .build(); - msc.createTable(tab1); + .create(msc, conf); Table tab2 = new TableBuilder() .setDbName(DBNAME1) .setTableName(TAB2) .addCol("id", "int") .addPartCol("name", "string") - .build(); - msc.createTable(tab2); - Partition part1 = new PartitionBuilder() - .fromTable(tab2) + .create(msc, conf); + new PartitionBuilder() + .inTable(tab2) .addValue("value1") - .build(); - msc.add_partition(part1); - Partition part2 = new PartitionBuilder() - .fromTable(tab2) + .addToTable(msc, conf); + new PartitionBuilder() + .inTable(tab2) .addValue("value2") - .build(); - msc.add_partition(part2); + .addToTable(msc, conf); } @AfterClass diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java index ba8c1a0038..adc82b0b9c 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java @@ -18,17 +18,24 @@ package org.apache.hadoop.hive.metastore; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; import java.util.Arrays; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; + @Category(MetastoreUnitTest.class) public class TestHiveAlterHandler { + private Configuration conf = MetastoreConf.newMetastoreConf(); + @Test public void testAlterTableAddColNotUpdateStats() throws MetaException, InvalidObjectException, NoSuchObjectException { FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment"); @@ -50,8 +57,9 @@ public void testAlterTableAddColNotUpdateStats() throws MetaException, InvalidOb RawStore msdb = Mockito.mock(RawStore.class); Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics( - oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3")); + getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3")); HiveAlterHandler handler = new HiveAlterHandler(); + handler.setConf(conf); handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable); } @@ -76,9 +84,10 @@ public void testAlterTableDelColUpdateStats() throws MetaException, InvalidObjec RawStore msdb = Mockito.mock(RawStore.class); HiveAlterHandler handler = new HiveAlterHandler(); + handler.setConf(conf); handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable); Mockito.verify(msdb, Mockito.times(1)).getTableColumnStatistics( - oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4") + getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4") ); } @@ -103,8 +112,9 @@ public void testAlterTableChangePosNotUpdateStats() throws MetaException, Invali RawStore msdb = Mockito.mock(RawStore.class); Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics( - oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4")); + getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4")); HiveAlterHandler handler = new HiveAlterHandler(); + handler.setConf(conf); handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable); } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 7091c5b2f5..9a56c1cb6d 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -178,10 +178,10 @@ private static void partitionTester(HiveMetaStoreClient client, Configuration co client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); - db = client.getDatabase(dbName); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + Database db = client.getDatabase(dbName); Path dbPath = new Path(db.getLocationUri()); FileSystem fs = FileSystem.get(dbPath.toUri(), conf); @@ -209,9 +209,7 @@ private static void partitionTester(HiveMetaStoreClient client, Configuration co .setSkewedColValueLocationMaps(Collections.singletonMap(skewedColValue, "location1")) .addPartCol("ds", ColumnType.STRING_TYPE_NAME) .addPartCol("hr", ColumnType.STRING_TYPE_NAME) - .build(); - - client.createTable(tbl); + .create(client, conf); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -709,19 +707,17 @@ public void testAlterViewParititon() throws Throwable { client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - db.setDescription("Alter Partition Test database"); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .setDescription("Alter Partition Test database") + .create(client, conf); Table tbl = new TableBuilder() .setDbName(dbName) .setTableName(tblName) .addCol("name", ColumnType.STRING_TYPE_NAME) .addCol("income", ColumnType.INT_TYPE_NAME) - .build(); - - client.createTable(tbl); + .create(client, conf); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -804,10 +800,10 @@ public void testAlterPartition() throws Throwable { client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - db.setDescription("Alter Partition Test database"); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .setDescription("Alter Partition Test database") + .create(client, conf); Table tbl = new TableBuilder() .setDbName(dbName) @@ -819,9 +815,7 @@ public void testAlterPartition() throws Throwable { .addSerdeParam(ColumnType.SERIALIZATION_FORMAT, "1") .addPartCol("ds", ColumnType.STRING_TYPE_NAME) .addPartCol("hr", ColumnType.INT_TYPE_NAME) - .build(); - - client.createTable(tbl); + .create(client, conf); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -884,10 +878,10 @@ public void testRenamePartition() throws Throwable { client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - db.setDescription("Rename Partition Test database"); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .setDescription("Rename Partition Test database") + .create(client, conf); Table tbl = new TableBuilder() .setDbName(dbName) @@ -896,9 +890,7 @@ public void testRenamePartition() throws Throwable { .addCol("income", ColumnType.INT_TYPE_NAME) .addPartCol("ds", ColumnType.STRING_TYPE_NAME) .addPartCol("hr", ColumnType.INT_TYPE_NAME) - .build(); - - client.createTable(tbl); + .create(client, conf); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -988,7 +980,7 @@ public void testDatabase() throws Throwable { Database db = new DatabaseBuilder() .setName(TEST_DB1_NAME) .setOwnerName(SecurityUtils.getUser()) - .build(); + .build(conf); Assert.assertEquals(SecurityUtils.getUser(), db.getOwnerName()); client.createDatabase(db); @@ -1000,9 +992,10 @@ public void testDatabase() throws Throwable { warehouse.getDatabasePath(db).toString(), db.getLocationUri()); assertEquals(db.getOwnerName(), SecurityUtils.getUser()); assertEquals(db.getOwnerType(), PrincipalType.USER); - Database db2 = new Database(); - db2.setName(TEST_DB2_NAME); - client.createDatabase(db2); + assertEquals(Warehouse.DEFAULT_CATALOG_NAME, db.getCatalogName()); + Database db2 = new DatabaseBuilder() + .setName(TEST_DB2_NAME) + .create(client, conf); db2 = client.getDatabase(TEST_DB2_NAME); @@ -1041,15 +1034,16 @@ public void testDatabaseLocationWithPermissionProblems() throws Exception { silentDropDatabase(TEST_DB1_NAME); - Database db = new Database(); - db.setName(TEST_DB1_NAME); String dbLocation = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test/_testDB_create_"; FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), conf); fs.mkdirs( new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"), new FsPermission((short) 0)); - db.setLocationUri(dbLocation); + Database db = new DatabaseBuilder() + .setName(TEST_DB1_NAME) + .setLocation(dbLocation) + .build(conf); boolean createFailed = false; @@ -1081,14 +1075,14 @@ public void testDatabaseLocation() throws Throwable { // clear up any existing databases silentDropDatabase(TEST_DB1_NAME); - Database db = new Database(); - db.setName(TEST_DB1_NAME); String dbLocation = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/_testDB_create_"; - db.setLocationUri(dbLocation); - client.createDatabase(db); + new DatabaseBuilder() + .setName(TEST_DB1_NAME) + .setLocation(dbLocation) + .create(client, conf); - db = client.getDatabase(TEST_DB1_NAME); + Database db = client.getDatabase(TEST_DB1_NAME); assertEquals("name of returned db is different from that of inserted db", TEST_DB1_NAME, db.getName()); @@ -1106,14 +1100,15 @@ public void testDatabaseLocation() throws Throwable { } assertTrue("Database " + TEST_DB1_NAME + " exists ", objectNotExist); - db = new Database(); - db.setName(TEST_DB1_NAME); dbLocation = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/_testDB_file_"; FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), conf); fs.createNewFile(new Path(dbLocation)); fs.deleteOnExit(new Path(dbLocation)); - db.setLocationUri(dbLocation); + db = new DatabaseBuilder() + .setName(TEST_DB1_NAME) + .setLocation(dbLocation) + .build(conf); boolean createFailed = false; try { @@ -1247,9 +1242,9 @@ public void testSimpleTable() throws Exception { client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); client.dropType(typeName); Type typ1 = new Type(); @@ -1268,9 +1263,7 @@ public void testSimpleTable() throws Exception { .setNumBuckets(1) .addBucketCol("name") .addStorageDescriptorParam("test_param_1", "Use this for comments etc") - .build(); - - client.createTable(tbl); + .create(client, conf); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -1397,7 +1390,8 @@ public void testSimpleTable() throws Exception { udbe = e; } assertNotNull(udbe); - assertTrue("DB not found", udbe.getMessage().contains("not find database db_that_doesnt_exist")); + assertTrue("DB not found", + udbe.getMessage().contains("not find database hive.db_that_doesnt_exist")); udbe = null; try { @@ -1498,9 +1492,9 @@ public void testColumnStatistics() throws Throwable { try { cleanUp(dbName, tblName, typeName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); createTableForTestFilter(dbName,tblName, tblOwner, lastAccessed, true); // Create a ColumnStatistics Obj @@ -1658,17 +1652,16 @@ public void testGetSchemaWithNoClassDefFoundError() throws TException { client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); Table tbl = new TableBuilder() .setDbName(dbName) .setTableName(tblName) .addCol("name", ColumnType.STRING_TYPE_NAME, "") .setSerdeLib("no.such.class") - .build(); - client.createTable(tbl); + .create(client, conf); client.getSchema(dbName, tblName); } @@ -1683,9 +1676,9 @@ public void testAlterTable() throws Exception { client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); ArrayList invCols = new ArrayList<>(2); invCols.add(new FieldSchema("n-ame", ColumnType.STRING_TYPE_NAME, "")); @@ -1695,7 +1688,7 @@ public void testAlterTable() throws Exception { .setDbName(dbName) .setTableName(invTblName) .setCols(invCols) - .build(); + .build(conf); boolean failed = false; try { @@ -1834,9 +1827,9 @@ public void testComplexTable() throws Exception { try { client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); client.dropType(typeName); Type typ1 = new Type(); @@ -1857,9 +1850,7 @@ public void testComplexTable() throws Exception { .setNumBuckets(1) .addBucketCol("name") .addStorageDescriptorParam("test_param_1","Use this for comments etc") - .build(); - - client.createTable(tbl); + .create(client, conf); Table tbl2 = client.getTable(dbName, tblName); assertEquals(tbl2.getDbName(), dbName); @@ -1920,22 +1911,21 @@ public void testTableDatabase() throws Exception { try { silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); String dbLocation = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "_testDB_table_create_"; - db.setLocationUri(dbLocation); - client.createDatabase(db); - db = client.getDatabase(dbName); + new DatabaseBuilder() + .setName(dbName) + .setLocation(dbLocation) + .create(client, conf); + Database db = client.getDatabase(dbName); Table tbl = new TableBuilder() .setDbName(dbName) .setTableName(tblName_1) .addCol("name", ColumnType.STRING_TYPE_NAME) .addCol("income", ColumnType.INT_TYPE_NAME) - .build(); + .create(client, conf); - client.createTable(tbl); tbl = client.getTable(dbName, tblName_1); Path path = new Path(tbl.getSd().getLocation()); @@ -2014,9 +2004,9 @@ public void testPartitionFilter() throws Exception { silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); Table tbl = new TableBuilder() .setDbName(dbName) @@ -2026,8 +2016,7 @@ public void testPartitionFilter() throws Exception { .addPartCol("p1", ColumnType.STRING_TYPE_NAME) .addPartCol("p2", ColumnType.STRING_TYPE_NAME) .addPartCol("p3", ColumnType.INT_TYPE_NAME) - .build(); - client.createTable(tbl); + .create(client, conf); tbl = client.getTable(dbName, tblName); @@ -2188,9 +2177,9 @@ public void testFilterSinglePartition() throws Exception { silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); Table tbl = new TableBuilder() .setDbName(dbName) @@ -2198,8 +2187,7 @@ public void testFilterSinglePartition() throws Exception { .addCol("c1", ColumnType.STRING_TYPE_NAME) .addCol("c2", ColumnType.INT_TYPE_NAME) .addPartCol("p1", ColumnType.STRING_TYPE_NAME) - .build(); - client.createTable(tbl); + .create(client, conf); tbl = client.getTable(dbName, tblName); @@ -2249,9 +2237,8 @@ public void testFilterLastPartition() throws Exception { .addCol("c2", ColumnType.INT_TYPE_NAME) .addPartCol("p1", ColumnType.STRING_TYPE_NAME) .addPartCol("p2", ColumnType.STRING_TYPE_NAME) - .build(); + .create(client, conf); - client.createTable(tbl); tbl = client.getTable(dbName, tblName); add_partition(client, tbl, vals, "part1"); @@ -2334,10 +2321,10 @@ public void testTableFilter() throws Exception { client.dropTable(dbName, tableName2); client.dropTable(dbName, tableName3); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - db.setDescription("Alter Partition Test database"); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .setDescription("Alter Partition Test database") + .create(client, conf); Table table1 = createTableForTestFilter(dbName,tableName1, owner1, lastAccessTime1, true); Table table2 = createTableForTestFilter(dbName,tableName2, owner2, lastAccessTime2, true); @@ -2475,8 +2462,7 @@ private Table createTableForTestFilter(String dbName, String tableName, String o .setTableParams(tableParams) .setOwner(owner) .setLastAccessTime(lastAccessTime) - .build(); - client.createTable(tbl); + .create(client, conf); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -2508,8 +2494,7 @@ public void testConcurrentMetastores() throws Exception { .setTableName(tblName) .addCol("c1", ColumnType.STRING_TYPE_NAME) .addCol("c2", ColumnType.INT_TYPE_NAME) - .build(); - client.createTable(tbl1); + .create(client, conf); // get the table from the client, verify the name is correct Table tbl2 = client.getTable(dbName, tblName); @@ -2692,10 +2677,9 @@ private void cleanUp(String dbName, String tableName, String typeName) throws Ex private Database createDb(String dbName) throws Exception { if(null == dbName) { return null; } - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); - return db; + return new DatabaseBuilder() + .setName(dbName) + .create(client, conf); } private Type createType(String typeName, Map fields) throws Throwable { @@ -2717,13 +2701,12 @@ private Type createType(String typeName, Map fields) throws Thro */ private void createTable(String dbName, String tableName) throws TException { - Table t = new TableBuilder() + new TableBuilder() .setDbName(dbName) .setTableName(tableName) .addCol("foo", "string") .addCol("bar", "string") - .build(); - client.createTable(t); + .create(client, conf); } private List createPartitions(String dbName, Table tbl, @@ -2765,8 +2748,7 @@ private void createMultiPartitionTableSchema(String dbName, String tblName, .addCol("income", ColumnType.INT_TYPE_NAME) .addPartCol("ds", ColumnType.STRING_TYPE_NAME) .addPartCol("hr", ColumnType.STRING_TYPE_NAME) - .build(); - client.createTable(tbl); + .create(client, conf); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -2797,12 +2779,12 @@ public void testDBOwnerChange() throws TException { final String role1 = "role1"; silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - db.setOwnerName(user1); - db.setOwnerType(PrincipalType.USER); + Database db = new DatabaseBuilder() + .setName(dbName) + .setOwnerName(user1) + .setOwnerType(PrincipalType.USER) + .create(client, conf); - client.createDatabase(db); checkDbOwnerType(dbName, user1, PrincipalType.USER); db.setOwnerName(user2); @@ -2827,9 +2809,9 @@ public void testGetTableObjects() throws Exception { // Setup silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); for (String tableName : tableNames) { createTable(dbName, tableName); } @@ -2853,12 +2835,12 @@ public void testDBLocationChange() throws IOException, TException { String defaultUri = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/default_location.db"; String newUri = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/new_location.db"; - Database db = new Database(); - db.setName(dbName); - db.setLocationUri(defaultUri); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .setLocation(defaultUri) + .create(client, conf); - db = client.getDatabase(dbName); + Database db = client.getDatabase(dbName); assertEquals("Incorrect default location of the database", warehouse.getDnsPath(new Path(defaultUri)).toString(), db.getLocationUri()); @@ -2981,19 +2963,18 @@ public void testValidateTableCols() throws Throwable { client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - db.setDescription("Validate Table Columns test"); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .setDescription("Validate Table Columns test") + .create(client, conf); Table tbl = new TableBuilder() .setDbName(dbName) .setTableName(tblName) .addCol("name", ColumnType.STRING_TYPE_NAME) .addCol("income", ColumnType.INT_TYPE_NAME) - .build(); + .create(client, conf); - client.createTable(tbl); if (isThriftClient) { tbl = client.getTable(dbName, tblName); } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java index b2d1d5a679..df83171648 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.partition.spec.CompositePartitionSpecProxy; @@ -121,11 +122,9 @@ private static void clearAndRecreateDB(HiveMetaStoreClient hmsc) throws Exceptio true // Cascade. ); - hmsc.createDatabase(new Database(dbName, - "", // Description. - null, // Location. - null // Parameters. - )); + new DatabaseBuilder() + .setName(dbName) + .create(hmsc, conf); } // Get partition-path. For grid='XYZ', place the partition outside the table-path. diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreSchemaMethods.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreSchemaMethods.java index 1b30090b8e..3d48c5f542 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreSchemaMethods.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreSchemaMethods.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.FindSchemasByColsResp; @@ -36,6 +37,7 @@ import org.apache.hadoop.hive.metastore.api.SchemaVersionState; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.SerdeType; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.ISchemaBuilder; import org.apache.hadoop.hive.metastore.client.builder.SchemaVersionBuilder; @@ -64,6 +66,7 @@ import java.util.List; import java.util.Map; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; // This does the testing using a remote metastore, as that finds more issues in thrift @@ -74,11 +77,12 @@ private static Map preEvents; private static IMetaStoreClient client; + private static Configuration conf; @BeforeClass public static void startMetastore() throws Exception { - Configuration conf = MetastoreConf.newMetastoreConf(); + conf = MetastoreConf.newMetastoreConf(); MetaStoreTestUtils.setConfForStandloneMode(conf); MetastoreConf.setClass(conf, ConfVars.EVENT_LISTENERS, SchemaEventListener.class, MetaStoreEventListener.class); @@ -101,7 +105,7 @@ public void newMaps() { @Test(expected = NoSuchObjectException.class) public void getNonExistentSchema() throws TException { - client.getISchema(DEFAULT_DATABASE_NAME, "no.such.schema"); + client.getISchema(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "no.such.schema"); } @Test @@ -124,11 +128,13 @@ public void iSchema() throws TException { Assert.assertEquals(1, (int)events.get(EventMessage.EventType.CREATE_ISCHEMA)); Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.CREATE_ISCHEMA)); - schema = client.getISchema(DEFAULT_DATABASE_NAME, schemaName); + schema = client.getISchema(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.READ_ISCHEMA)); Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType()); Assert.assertEquals(schemaName, schema.getName()); + Assert.assertEquals(DEFAULT_CATALOG_NAME, schema.getCatName()); + Assert.assertEquals(DEFAULT_DATABASE_NAME, schema.getDbName()); Assert.assertEquals(SchemaCompatibility.FORWARD, schema.getCompatibility()); Assert.assertEquals(SchemaValidation.LATEST, schema.getValidationLevel()); Assert.assertFalse(schema.isCanEvolve()); @@ -142,12 +148,12 @@ public void iSchema() throws TException { schema.setCanEvolve(true); schema.setSchemaGroup(schemaGroup); schema.setDescription(description); - client.alterISchema(DEFAULT_DATABASE_NAME, schemaName, schema); + client.alterISchema(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, schema); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.ALTER_ISCHEMA)); Assert.assertEquals(1, (int)events.get(EventMessage.EventType.ALTER_ISCHEMA)); Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.ALTER_ISCHEMA)); - schema = client.getISchema(DEFAULT_DATABASE_NAME, schemaName); + schema = client.getISchema(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName); Assert.assertEquals(2, (int)preEvents.get(PreEventContext.PreEventType.READ_ISCHEMA)); Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType()); @@ -158,12 +164,12 @@ public void iSchema() throws TException { Assert.assertEquals(schemaGroup, schema.getSchemaGroup()); Assert.assertEquals(description, schema.getDescription()); - client.dropISchema(DEFAULT_DATABASE_NAME, schemaName); + client.dropISchema(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.DROP_ISCHEMA)); Assert.assertEquals(1, (int)events.get(EventMessage.EventType.DROP_ISCHEMA)); Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.DROP_ISCHEMA)); try { - client.getISchema(DEFAULT_DATABASE_NAME, schemaName); + client.getISchema(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName); Assert.fail(); } catch (NoSuchObjectException e) { // all good @@ -172,11 +178,18 @@ public void iSchema() throws TException { @Test public void iSchemaOtherDatabase() throws TException { + String catName = "other_cat"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + String dbName = "other_db"; Database db = new DatabaseBuilder() .setName(dbName) - .build(); - client.createDatabase(db); + .setCatalogName(catName) + .create(client, conf); String schemaName = uniqueSchemaName(); String schemaGroup = "group1"; @@ -184,7 +197,7 @@ public void iSchemaOtherDatabase() throws TException { ISchema schema = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName) - .setDbName(dbName) + .inDb(db) .setCompatibility(SchemaCompatibility.FORWARD) .setValidationLevel(SchemaValidation.LATEST) .setCanEvolve(false) @@ -193,10 +206,11 @@ public void iSchemaOtherDatabase() throws TException { .build(); client.createISchema(schema); - schema = client.getISchema(dbName, schemaName); + schema = client.getISchema(catName, dbName, schemaName); Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType()); Assert.assertEquals(schemaName, schema.getName()); + Assert.assertEquals(catName, schema.getCatName()); Assert.assertEquals(dbName, schema.getDbName()); Assert.assertEquals(SchemaCompatibility.FORWARD, schema.getCompatibility()); Assert.assertEquals(SchemaValidation.LATEST, schema.getValidationLevel()); @@ -211,12 +225,13 @@ public void iSchemaOtherDatabase() throws TException { schema.setCanEvolve(true); schema.setSchemaGroup(schemaGroup); schema.setDescription(description); - client.alterISchema(dbName, schemaName, schema); + client.alterISchema(catName, dbName, schemaName, schema); - schema = client.getISchema(dbName, schemaName); + schema = client.getISchema(catName, dbName, schemaName); Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType()); Assert.assertEquals(schemaName, schema.getName()); + Assert.assertEquals(catName, schema.getCatName()); Assert.assertEquals(dbName, schema.getDbName()); Assert.assertEquals(SchemaCompatibility.BOTH, schema.getCompatibility()); Assert.assertEquals(SchemaValidation.ALL, schema.getValidationLevel()); @@ -224,9 +239,9 @@ public void iSchemaOtherDatabase() throws TException { Assert.assertEquals(schemaGroup, schema.getSchemaGroup()); Assert.assertEquals(description, schema.getDescription()); - client.dropISchema(dbName, schemaName); + client.dropISchema(catName, dbName, schemaName); try { - client.getISchema(dbName, schemaName); + client.getISchema(catName, dbName, schemaName); Assert.fail(); } catch (NoSuchObjectException e) { // all good @@ -252,7 +267,6 @@ public void schemaAlreadyExists() throws TException { .build(); client.createISchema(schema); - schema = client.getISchema(DEFAULT_DATABASE_NAME, schemaName); Assert.assertNotNull(schema); Assert.assertEquals(SchemaType.HIVE, schema.getSchemaType()); @@ -273,19 +287,18 @@ public void alterNonExistentSchema() throws TException { .setName(schemaName) .setDescription("a new description") .build(); - client.alterISchema(DEFAULT_DATABASE_NAME, schemaName, schema); + client.alterISchema(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, schema); } @Test(expected = NoSuchObjectException.class) public void dropNonExistentSchema() throws TException { - client.dropISchema(DEFAULT_DATABASE_NAME, "no_such_schema"); + client.dropISchema(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "no_such_schema"); } @Test(expected = NoSuchObjectException.class) public void createVersionOfNonExistentSchema() throws TException { SchemaVersion schemaVersion = new SchemaVersionBuilder() .setSchemaName("noSchemaOfThisNameExists") - .setDbName(DEFAULT_DATABASE_NAME) .setVersion(1) .addCol("a", ColumnType.STRING_TYPE_NAME) .build(); @@ -333,10 +346,11 @@ public void addSchemaVersion() throws TException { Assert.assertEquals(1, (int)events.get(EventMessage.EventType.ADD_SCHEMA_VERSION)); Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.ADD_SCHEMA_VERSION)); - schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schemaName, version); + schemaVersion = client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, version); Assert.assertNotNull(schemaVersion); Assert.assertEquals(schemaName, schemaVersion.getSchema().getSchemaName()); Assert.assertEquals(DEFAULT_DATABASE_NAME, schemaVersion.getSchema().getDbName()); + Assert.assertEquals(DEFAULT_CATALOG_NAME, schemaVersion.getSchema().getCatName()); Assert.assertEquals(version, schemaVersion.getVersion()); Assert.assertEquals(creationTime, schemaVersion.getCreatedAt()); Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState()); @@ -357,12 +371,12 @@ public void addSchemaVersion() throws TException { Assert.assertEquals(ColumnType.FLOAT_TYPE_NAME, cols.get(1).getType()); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.READ_SCHEMA_VERSION)); - client.dropSchemaVersion(DEFAULT_DATABASE_NAME, schemaName, version); + client.dropSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, version); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.DROP_SCHEMA_VERSION)); Assert.assertEquals(1, (int)events.get(EventMessage.EventType.DROP_SCHEMA_VERSION)); Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.DROP_SCHEMA_VERSION)); try { - client.getSchemaVersion(DEFAULT_DATABASE_NAME, schemaName, version); + client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, version); Assert.fail(); } catch (NoSuchObjectException e) { // all good @@ -371,17 +385,24 @@ public void addSchemaVersion() throws TException { @Test public void addSchemaVersionOtherDb() throws TException { + String catName = "other_cat_for_schema_version"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + String dbName = "other_db_for_schema_version"; Database db = new DatabaseBuilder() .setName(dbName) - .build(); - client.createDatabase(db); + .setCatalogName(catName) + .create(client, conf); String schemaName = uniqueSchemaName(); int version = 1; ISchema schema = new ISchemaBuilder() - .setDbName(dbName) + .inDb(db) .setSchemaType(SchemaType.AVRO) .setName(schemaName) .build(); @@ -414,10 +435,11 @@ public void addSchemaVersionOtherDb() throws TException { .build(); client.addSchemaVersion(schemaVersion); - schemaVersion = client.getSchemaVersion(dbName, schemaName, version); + schemaVersion = client.getSchemaVersion(catName, dbName, schemaName, version); Assert.assertNotNull(schemaVersion); Assert.assertEquals(schemaName, schemaVersion.getSchema().getSchemaName()); Assert.assertEquals(dbName, schemaVersion.getSchema().getDbName()); + Assert.assertEquals(catName, schemaVersion.getSchema().getCatName()); Assert.assertEquals(version, schemaVersion.getVersion()); Assert.assertEquals(creationTime, schemaVersion.getCreatedAt()); Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState()); @@ -438,9 +460,9 @@ public void addSchemaVersionOtherDb() throws TException { Assert.assertEquals(ColumnType.FLOAT_TYPE_NAME, cols.get(1).getType()); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.READ_SCHEMA_VERSION)); - client.dropSchemaVersion(dbName, schemaName, version); + client.dropSchemaVersion(catName, dbName, schemaName, version); try { - client.getSchemaVersion(dbName, schemaName, version); + client.getSchemaVersion(catName, dbName, schemaName, version); Assert.fail(); } catch (NoSuchObjectException e) { // all good @@ -484,7 +506,7 @@ public void multipleSchemaVersions() throws TException { Assert.assertEquals(3, (int)events.get(EventMessage.EventType.ADD_SCHEMA_VERSION)); Assert.assertEquals(3, (int)transactionalEvents.get(EventMessage.EventType.ADD_SCHEMA_VERSION)); - schemaVersion = client.getSchemaLatestVersion(DEFAULT_DATABASE_NAME, schemaName); + schemaVersion = client.getSchemaLatestVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName); Assert.assertEquals(3, schemaVersion.getVersion()); Assert.assertEquals(3, schemaVersion.getColsSize()); List cols = schemaVersion.getCols(); @@ -497,7 +519,7 @@ public void multipleSchemaVersions() throws TException { Assert.assertEquals(ColumnType.TIMESTAMP_TYPE_NAME, cols.get(2).getType()); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.READ_SCHEMA_VERSION)); - List versions = client.getSchemaAllVersions(DEFAULT_DATABASE_NAME, schemaName); + List versions = client.getSchemaAllVersions(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName); Assert.assertEquals(2, (int)preEvents.get(PreEventContext.PreEventType.READ_SCHEMA_VERSION)); Assert.assertEquals(3, versions.size()); versions.sort(Comparator.comparingInt(SchemaVersion::getVersion)); @@ -534,7 +556,7 @@ public void nonExistentSchemaVersion() throws TException { .setName(schemaName) .build(); client.createISchema(schema); - client.getSchemaVersion(DEFAULT_DATABASE_NAME, schemaName, 1); + client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, 1); } @Test(expected = NoSuchObjectException.class) @@ -545,7 +567,18 @@ public void schemaVersionBogusDb() throws TException { .setName(schemaName) .build(); client.createISchema(schema); - client.getSchemaVersion("bogus", schemaName, 1); + client.getSchemaVersion(DEFAULT_CATALOG_NAME, "bogus", schemaName, 1); + } + + @Test(expected = NoSuchObjectException.class) + public void schemaVersionBogusCatalog() throws TException { + String schemaName = uniqueSchemaName(); + ISchema schema = new ISchemaBuilder() + .setSchemaType(SchemaType.AVRO) + .setName(schemaName) + .build(); + client.createISchema(schema); + client.getSchemaVersion("bogus", DEFAULT_DATABASE_NAME, schemaName, 1); } @Test(expected = NoSuchObjectException.class) @@ -566,7 +599,7 @@ public void nonExistentSchemaVersionButOtherVersionsExist() throws TException { .build(); client.addSchemaVersion(schemaVersion); - client.getSchemaVersion(DEFAULT_DATABASE_NAME, schemaName, 2); + client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, 2); } @Test(expected = NoSuchObjectException.class) @@ -577,12 +610,12 @@ public void getLatestSchemaButNoVersions() throws TException { .setName(schemaName) .build(); client.createISchema(schema); - client.getSchemaLatestVersion(DEFAULT_DATABASE_NAME, schemaName); + client.getSchemaLatestVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName); } @Test(expected = NoSuchObjectException.class) public void getLatestSchemaNoSuchSchema() throws TException { - client.getSchemaLatestVersion(DEFAULT_DATABASE_NAME, "no.such.schema.with.this.name"); + client.getSchemaLatestVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "no.such.schema.with.this.name"); } @Test(expected = NoSuchObjectException.class) @@ -593,7 +626,18 @@ public void latestSchemaVersionBogusDb() throws TException { .setName(schemaName) .build(); client.createISchema(schema); - client.getSchemaLatestVersion("bogus", schemaName); + client.getSchemaLatestVersion(DEFAULT_CATALOG_NAME, "bogus", schemaName); + } + + @Test(expected = NoSuchObjectException.class) + public void latestSchemaVersionBogusCatalog() throws TException { + String schemaName = uniqueSchemaName(); + ISchema schema = new ISchemaBuilder() + .setSchemaType(SchemaType.AVRO) + .setName(schemaName) + .build(); + client.createISchema(schema); + client.getSchemaLatestVersion("bogus", DEFAULT_DATABASE_NAME, schemaName); } @Test(expected = NoSuchObjectException.class) @@ -604,12 +648,12 @@ public void getAllSchemaButNoVersions() throws TException { .setName(schemaName) .build(); client.createISchema(schema); - client.getSchemaAllVersions(DEFAULT_DATABASE_NAME, schemaName); + client.getSchemaAllVersions(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName); } @Test(expected = NoSuchObjectException.class) public void getAllSchemaNoSuchSchema() throws TException { - client.getSchemaAllVersions(DEFAULT_DATABASE_NAME, "no.such.schema.with.this.name"); + client.getSchemaAllVersions(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "no.such.schema.with.this.name"); } @Test(expected = NoSuchObjectException.class) @@ -620,7 +664,18 @@ public void allSchemaVersionBogusDb() throws TException { .setName(schemaName) .build(); client.createISchema(schema); - client.getSchemaAllVersions("bogus", schemaName); + client.getSchemaAllVersions(DEFAULT_CATALOG_NAME, "bogus", schemaName); + } + + @Test(expected = NoSuchObjectException.class) + public void allSchemaVersionBogusCatalog() throws TException { + String schemaName = uniqueSchemaName(); + ISchema schema = new ISchemaBuilder() + .setSchemaType(SchemaType.AVRO) + .setName(schemaName) + .build(); + client.createISchema(schema); + client.getSchemaAllVersions("bogus", DEFAULT_DATABASE_NAME, schemaName); } @Test(expected = AlreadyExistsException.class) @@ -648,7 +703,7 @@ public void addDuplicateSchemaVersion() throws TException { @Test(expected = NoSuchObjectException.class) public void mapSerDeNoSuchSchema() throws TException { SerDeInfo serDeInfo = new SerDeInfo(uniqueSerdeName(), "lib", Collections.emptyMap()); - client.mapSchemaVersionToSerde(DEFAULT_DATABASE_NAME, uniqueSchemaName(), 1, serDeInfo.getName()); + client.mapSchemaVersionToSerde(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, uniqueSchemaName(), 1, serDeInfo.getName()); } @Test(expected = NoSuchObjectException.class) @@ -659,7 +714,7 @@ public void mapSerDeNoSuchSchemaVersion() throws TException { .setName(uniqueSchemaName()) .build(); client.createISchema(schema); - client.mapSchemaVersionToSerde(DEFAULT_DATABASE_NAME, schema.getName(), 3, serDeInfo.getName()); + client.mapSchemaVersionToSerde(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schema.getName(), 3, serDeInfo.getName()); } @Test(expected = NoSuchObjectException.class) @@ -676,7 +731,7 @@ public void mapNonExistentSerdeToSchemaVersion() throws TException { .addCol("x", ColumnType.BOOLEAN_TYPE_NAME) .build(); client.addSchemaVersion(schemaVersion); - client.mapSchemaVersionToSerde(DEFAULT_DATABASE_NAME, schema.getName(), schemaVersion.getVersion(), uniqueSerdeName()); + client.mapSchemaVersionToSerde(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schema.getName(), schemaVersion.getVersion(), uniqueSerdeName()); } @Test @@ -698,8 +753,8 @@ public void mapSerdeToSchemaVersion() throws TException { SerDeInfo serDeInfo = new SerDeInfo(uniqueSerdeName(), "lib", Collections.emptyMap()); client.addSerDe(serDeInfo); - client.mapSchemaVersionToSerde(DEFAULT_DATABASE_NAME, schema.getName(), schemaVersion.getVersion(), serDeInfo.getName()); - schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schema.getName(), schemaVersion.getVersion()); + client.mapSchemaVersionToSerde(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schema.getName(), schemaVersion.getVersion(), serDeInfo.getName()); + schemaVersion = client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schema.getName(), schemaVersion.getVersion()); Assert.assertEquals(serDeInfo.getName(), schemaVersion.getSerDe().getName()); // Create schema with a serde, then remap it @@ -713,27 +768,34 @@ public void mapSerdeToSchemaVersion() throws TException { .build(); client.addSchemaVersion(schemaVersion); - schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schema.getName(), 2); + schemaVersion = client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schema.getName(), 2); Assert.assertEquals(serDeName, schemaVersion.getSerDe().getName()); serDeInfo = new SerDeInfo(uniqueSerdeName(), "y", Collections.emptyMap()); client.addSerDe(serDeInfo); - client.mapSchemaVersionToSerde(DEFAULT_DATABASE_NAME, schema.getName(), 2, serDeInfo.getName()); - schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schema.getName(), 2); + client.mapSchemaVersionToSerde(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schema.getName(), 2, serDeInfo.getName()); + schemaVersion = client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schema.getName(), 2); Assert.assertEquals(serDeInfo.getName(), schemaVersion.getSerDe().getName()); } @Test public void mapSerdeToSchemaVersionOtherDb() throws TException { + String catName = "other_cat_for_map_to"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + String dbName = "map_other_db"; Database db = new DatabaseBuilder() .setName(dbName) - .build(); - client.createDatabase(db); + .setCatalogName(catName) + .create(client, conf); ISchema schema = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) - .setDbName(dbName) + .inDb(db) .setName(uniqueSchemaName()) .build(); client.createISchema(schema); @@ -749,8 +811,8 @@ public void mapSerdeToSchemaVersionOtherDb() throws TException { SerDeInfo serDeInfo = new SerDeInfo(uniqueSerdeName(), "lib", Collections.emptyMap()); client.addSerDe(serDeInfo); - client.mapSchemaVersionToSerde(dbName, schema.getName(), schemaVersion.getVersion(), serDeInfo.getName()); - schemaVersion = client.getSchemaVersion(dbName, schema.getName(), schemaVersion.getVersion()); + client.mapSchemaVersionToSerde(catName, dbName, schema.getName(), schemaVersion.getVersion(), serDeInfo.getName()); + schemaVersion = client.getSchemaVersion(catName, dbName, schema.getName(), schemaVersion.getVersion()); Assert.assertEquals(serDeInfo.getName(), schemaVersion.getSerDe().getName()); // Create schema with a serde, then remap it @@ -764,13 +826,13 @@ public void mapSerdeToSchemaVersionOtherDb() throws TException { .build(); client.addSchemaVersion(schemaVersion); - schemaVersion = client.getSchemaVersion(dbName, schema.getName(), 2); + schemaVersion = client.getSchemaVersion(catName, dbName, schema.getName(), 2); Assert.assertEquals(serDeName, schemaVersion.getSerDe().getName()); serDeInfo = new SerDeInfo(uniqueSerdeName(), "y", Collections.emptyMap()); client.addSerDe(serDeInfo); - client.mapSchemaVersionToSerde(dbName, schema.getName(), 2, serDeInfo.getName()); - schemaVersion = client.getSchemaVersion(dbName, schema.getName(), 2); + client.mapSchemaVersionToSerde(catName, dbName, schema.getName(), 2, serDeInfo.getName()); + schemaVersion = client.getSchemaVersion(catName, dbName, schema.getName(), 2); Assert.assertEquals(serDeInfo.getName(), schemaVersion.getSerDe().getName()); } @@ -811,7 +873,7 @@ public void noSuchSerDe() throws TException { @Test(expected = NoSuchObjectException.class) public void setVersionStateNoSuchSchema() throws TException { - client.setSchemaVersionState(DEFAULT_DATABASE_NAME, "no.such.schema", 1, SchemaVersionState.INITIATED); + client.setSchemaVersionState(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "no.such.schema", 1, SchemaVersionState.INITIATED); } @Test(expected = NoSuchObjectException.class) @@ -822,7 +884,7 @@ public void setVersionStateNoSuchVersion() throws TException { .setName(schemaName) .build(); client.createISchema(schema); - client.setSchemaVersionState(DEFAULT_DATABASE_NAME, schemaName, 1, SchemaVersionState.INITIATED); + client.setSchemaVersionState(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, 1, SchemaVersionState.INITIATED); } @Test @@ -841,37 +903,44 @@ public void setVersionState() throws TException { .build(); client.addSchemaVersion(schemaVersion); - schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schemaName, 1); + schemaVersion = client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, 1); Assert.assertNull(schemaVersion.getState()); - client.setSchemaVersionState(DEFAULT_DATABASE_NAME, schemaName, 1, SchemaVersionState.INITIATED); + client.setSchemaVersionState(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, 1, SchemaVersionState.INITIATED); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.ALTER_SCHEMA_VERSION)); Assert.assertEquals(1, (int)events.get(EventMessage.EventType.ALTER_SCHEMA_VERSION)); Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.ALTER_SCHEMA_VERSION)); - schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schemaName, 1); + schemaVersion = client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, 1); Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState()); - client.setSchemaVersionState(DEFAULT_DATABASE_NAME, schemaName, 1, SchemaVersionState.REVIEWED); + client.setSchemaVersionState(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, 1, SchemaVersionState.REVIEWED); Assert.assertEquals(2, (int)preEvents.get(PreEventContext.PreEventType.ALTER_SCHEMA_VERSION)); Assert.assertEquals(2, (int)events.get(EventMessage.EventType.ALTER_SCHEMA_VERSION)); Assert.assertEquals(2, (int)transactionalEvents.get(EventMessage.EventType.ALTER_SCHEMA_VERSION)); - schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schemaName, 1); + schemaVersion = client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, 1); Assert.assertEquals(SchemaVersionState.REVIEWED, schemaVersion.getState()); } @Test public void setVersionStateOtherDb() throws TException { + String catName = "other_cat_for_set_version"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + String dbName = "other_db_set_state"; Database db = new DatabaseBuilder() .setName(dbName) - .build(); - client.createDatabase(db); + .setCatalogName(catName) + .create(client, conf); String schemaName = uniqueSchemaName(); ISchema schema = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName) - .setDbName(dbName) + .inDb(db) .build(); client.createISchema(schema); @@ -882,27 +951,27 @@ public void setVersionStateOtherDb() throws TException { .build(); client.addSchemaVersion(schemaVersion); - schemaVersion = client.getSchemaVersion(dbName, schemaName, 1); + schemaVersion = client.getSchemaVersion(catName, dbName, schemaName, 1); Assert.assertNull(schemaVersion.getState()); - client.setSchemaVersionState(dbName, schemaName, 1, SchemaVersionState.INITIATED); + client.setSchemaVersionState(catName, dbName, schemaName, 1, SchemaVersionState.INITIATED); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.ALTER_SCHEMA_VERSION)); Assert.assertEquals(1, (int)events.get(EventMessage.EventType.ALTER_SCHEMA_VERSION)); Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.ALTER_SCHEMA_VERSION)); - schemaVersion = client.getSchemaVersion(dbName, schemaName, 1); + schemaVersion = client.getSchemaVersion(catName, dbName, schemaName, 1); Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState()); - client.setSchemaVersionState(dbName, schemaName, 1, SchemaVersionState.REVIEWED); + client.setSchemaVersionState(catName, dbName, schemaName, 1, SchemaVersionState.REVIEWED); Assert.assertEquals(2, (int)preEvents.get(PreEventContext.PreEventType.ALTER_SCHEMA_VERSION)); Assert.assertEquals(2, (int)events.get(EventMessage.EventType.ALTER_SCHEMA_VERSION)); Assert.assertEquals(2, (int)transactionalEvents.get(EventMessage.EventType.ALTER_SCHEMA_VERSION)); - schemaVersion = client.getSchemaVersion(dbName, schemaName, 1); + schemaVersion = client.getSchemaVersion(catName, dbName, schemaName, 1); Assert.assertEquals(SchemaVersionState.REVIEWED, schemaVersion.getState()); } @Test(expected = NoSuchObjectException.class) public void dropNonExistentSchemaVersion() throws TException { - client.dropSchemaVersion(DEFAULT_DATABASE_NAME, "ther is no schema named this", 23); + client.dropSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "ther is no schema named this", 23); } @Test @@ -910,8 +979,7 @@ public void schemaQuery() throws TException { String dbName = "schema_query_db"; Database db = new DatabaseBuilder() .setName(dbName) - .build(); - client.createDatabase(db); + .create(client, conf); String schemaName1 = uniqueSchemaName(); ISchema schema1 = new ISchemaBuilder() diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java index 42df9c22d9..1560d052cc 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.util.StringUtils; @@ -75,13 +76,9 @@ public void testNoTimeout() throws Exception { String dbName = "db"; client.dropDatabase(dbName, true, true); - Database db = new Database(); - db.setName(dbName); - try { - client.createDatabase(db); - } catch (MetaException e) { - Assert.fail("should not throw timeout exception: " + e.getMessage()); - } + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); client.dropDatabase(dbName, true, true); } @@ -93,8 +90,9 @@ public void testTimeout() throws Exception { String dbName = "db"; client.dropDatabase(dbName, true, true); - Database db = new Database(); - db.setName(dbName); + Database db = new DatabaseBuilder() + .setName(dbName) + .build(conf); try { client.createDatabase(db); Assert.fail("should throw timeout exception."); @@ -114,8 +112,9 @@ public void testResetTimeout() throws Exception { // no timeout before reset client.dropDatabase(dbName, true, true); - Database db = new Database(); - db.setName(dbName); + Database db = new DatabaseBuilder() + .setName(dbName) + .build(conf); try { client.createDatabase(db); } catch (MetaException e) { diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java index d53a606d18..38b3f6e531 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java @@ -46,6 +46,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import static org.junit.Assert.assertEquals; /** @@ -85,6 +86,7 @@ public void setUp() throws Exception { envContext = new EnvironmentContext(envProperties); db.setName(dbName); + db.setCatalogName(DEFAULT_CATALOG_NAME); table = new TableBuilder() .setDbName(dbName) @@ -93,13 +95,13 @@ public void setUp() throws Exception { .addPartCol("b", "string") .addCol("a", "string") .addCol("b", "string") - .build(); + .build(conf); partition = new PartitionBuilder() - .fromTable(table) + .inTable(table) .addValue("2011") - .build(); + .build(conf); DummyListener.notifyList.clear(); } @@ -171,7 +173,7 @@ public void testEnvironmentContext() throws Exception { assert dropPartByNameEvent.getStatus(); assertEquals(envContext, dropPartByNameEvent.getEnvironmentContext()); - msc.dropTable(dbName, tblName, true, false, envContext); + msc.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName, true, false, envContext); listSize++; assertEquals(notifyList.size(), listSize); DropTableEvent dropTblEvent = (DropTableEvent)notifyList.get(listSize-1); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java index b477088709..00fae25be6 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java @@ -63,8 +63,7 @@ public void testMarkingPartitionSet() throws TException, InterruptedException { msc.dropDatabase(dbName, true, true, true); Database db = new DatabaseBuilder() .setName(dbName) - .build(); - msc.createDatabase(db); + .create(msc, conf); final String tableName = "tmptbl"; msc.dropTable(dbName, tableName, true, true); @@ -73,13 +72,12 @@ public void testMarkingPartitionSet() throws TException, InterruptedException { .setTableName(tableName) .addCol("a", "string") .addPartCol("b", "string") - .build(); - msc.createTable(table); + .create(msc, conf); Partition part = new PartitionBuilder() - .fromTable(table) + .inTable(table) .addValue("2011") - .build(); + .build(conf); msc.add_partition(part); Map kvs = new HashMap<>(); kvs.put("b", "'2011'"); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java index 1a720fbf51..b919eeffe2 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java @@ -72,8 +72,8 @@ public void testEndFunctionListener() throws Exception { Database db = new DatabaseBuilder() .setName(dbName) - .build(); - msc.createDatabase(db); + .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .create(msc, conf); try { msc.getDatabase("UnknownDB"); @@ -91,13 +91,12 @@ public void testEndFunctionListener() throws Exception { assertEquals(context.getInputTableName(), null); String unknownTable = "UnknownTable"; - Table table = new TableBuilder() - .setDbName(db) + new TableBuilder() + .inDb(db) .setTableName(tblName) .addCol("a", "string") .addPartCol("b", "string") - .build(); - msc.createTable(table); + .create(msc, conf); try { msc.getTable(dbName, unknownTable); } catch (Exception e1) { diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java index fb7f940b1e..fb4a761c28 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java @@ -201,13 +201,12 @@ public void testListener() throws Exception { assertEquals(notifyList.size(), listSize); assertEquals(preNotifyList.size(), listSize); - Database db = new DatabaseBuilder() + new DatabaseBuilder() .setName(dbName) - .build(); - msc.createDatabase(db); + .create(msc, conf); listSize++; PreCreateDatabaseEvent preDbEvent = (PreCreateDatabaseEvent)(preNotifyList.get(preNotifyList.size() - 1)); - db = msc.getDatabase(dbName); + Database db = msc.getDatabase(dbName); assertEquals(listSize, notifyList.size()); assertEquals(listSize + 1, preNotifyList.size()); validateCreateDb(db, preDbEvent.getDatabase()); @@ -217,12 +216,11 @@ public void testListener() throws Exception { validateCreateDb(db, dbEvent.getDatabase()); Table table = new TableBuilder() - .setDbName(db) + .inDb(db) .setTableName(tblName) .addCol("a", "string") .addPartCol("b", "string") - .build(); - msc.createTable(table); + .create(msc, conf); PreCreateTableEvent preTblEvent = (PreCreateTableEvent)(preNotifyList.get(preNotifyList.size() - 1)); listSize++; Table tbl = msc.getTable(dbName, tblName); @@ -234,18 +232,17 @@ public void testListener() throws Exception { validateCreateTable(tbl, tblEvent.getTable()); - Partition part = new PartitionBuilder() - .fromTable(table) + new PartitionBuilder() + .inTable(table) .addValue("2011") - .build(); - msc.add_partition(part); + .addToTable(msc, conf); listSize++; assertEquals(notifyList.size(), listSize); PreAddPartitionEvent prePartEvent = (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1)); AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1)); Assert.assertTrue(partEvent.getStatus()); - part = msc.getPartition("hive2038", "tmptbl", "b=2011"); + Partition part = msc.getPartition("hive2038", "tmptbl", "b=2011"); Partition partAdded = partEvent.getPartitionIterator().next(); validateAddPartition(part, partAdded); validateTableInAddPartition(tbl, partEvent.getTable()); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java index a91d1c8964..546422d476 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java @@ -39,7 +39,6 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import junit.framework.TestCase; import org.junit.experimental.categories.Category; /** @@ -78,8 +77,8 @@ public void testEventStatus() throws Exception { String dbName = "tmpDb"; Database db = new DatabaseBuilder() .setName(dbName) - .build(); - msc.createDatabase(db); + .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .create(msc, conf); listSize += 1; notifyList = DummyListener.notifyList; @@ -88,22 +87,20 @@ public void testEventStatus() throws Exception { String tableName = "unittest_TestMetaStoreEventListenerOnlyOnCommit"; Table table = new TableBuilder() - .setDbName(db) + .inDb(db) .setTableName(tableName) .addCol("id", "int") .addPartCol("ds", "string") - .build(); - msc.createTable(table); + .create(msc, conf); listSize += 1; notifyList = DummyListener.notifyList; assertEquals(notifyList.size(), listSize); assertTrue(DummyListener.getLastEvent().getStatus()); - Partition part = new PartitionBuilder() - .fromTable(table) + new PartitionBuilder() + .inTable(table) .addValue("foo1") - .build(); - msc.add_partition(part); + .addToTable(msc, conf); listSize += 1; notifyList = DummyListener.notifyList; assertEquals(notifyList.size(), listSize); @@ -111,11 +108,10 @@ public void testEventStatus() throws Exception { DummyRawStoreControlledCommit.setCommitSucceed(false); - part = new PartitionBuilder() - .fromTable(table) + new PartitionBuilder() + .inTable(table) .addValue("foo2") - .build(); - msc.add_partition(part); + .addToTable(msc, conf); listSize += 1; notifyList = DummyListener.notifyList; assertEquals(notifyList.size(), listSize); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMaterializationsCacheCleaner.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMaterializationsCacheCleaner.java index 6d3f68c0f0..7a871e1458 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMaterializationsCacheCleaner.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMaterializationsCacheCleaner.java @@ -31,6 +31,7 @@ import java.util.Map; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -82,7 +83,7 @@ public void testCleanerScenario1() throws Exception { when(mv1.getDbName()).thenReturn(DB_NAME); when(mv1.getTableName()).thenReturn(MV_NAME_1); CreationMetadata mockCM1 = new CreationMetadata( - DB_NAME, MV_NAME_1, + DEFAULT_CATALOG_NAME, DB_NAME, MV_NAME_1, ImmutableSet.of( DB_NAME + "." + TBL_NAME_1, DB_NAME + "." + TBL_NAME_2)); @@ -115,7 +116,7 @@ public void testCleanerScenario1() throws Exception { when(mv2.getDbName()).thenReturn(DB_NAME); when(mv2.getTableName()).thenReturn(MV_NAME_2); CreationMetadata mockCM2 = new CreationMetadata( - DB_NAME, MV_NAME_2, + DEFAULT_CATALOG_NAME, DB_NAME, MV_NAME_2, ImmutableSet.of( DB_NAME + "." + TBL_NAME_1, DB_NAME + "." + TBL_NAME_2)); @@ -222,7 +223,7 @@ public void testCleanerScenario2() throws Exception { when(mv1.getDbName()).thenReturn(DB_NAME); when(mv1.getTableName()).thenReturn(MV_NAME_1); CreationMetadata mockCM1 = new CreationMetadata( - DB_NAME, MV_NAME_1, + DEFAULT_CATALOG_NAME, DB_NAME, MV_NAME_1, ImmutableSet.of( DB_NAME + "." + TBL_NAME_1, DB_NAME + "." + TBL_NAME_2)); @@ -255,7 +256,7 @@ public void testCleanerScenario2() throws Exception { when(mv2.getDbName()).thenReturn(DB_NAME); when(mv2.getTableName()).thenReturn(MV_NAME_2); CreationMetadata mockCM2 = new CreationMetadata( - DB_NAME, MV_NAME_2, + DEFAULT_CATALOG_NAME, DB_NAME, MV_NAME_2, ImmutableSet.of( DB_NAME + "." + TBL_NAME_1, DB_NAME + "." + TBL_NAME_2)); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestNonCatCallsWithCatalog.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestNonCatCallsWithCatalog.java new file mode 100644 index 0000000000..55ef885aec --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestNonCatCallsWithCatalog.java @@ -0,0 +1,1126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import com.google.common.collect.Lists; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.FunctionType; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionValuesRequest; +import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; +import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.ResourceType; +import org.apache.hadoop.hive.metastore.api.ResourceUri; +import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TableMeta; +import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.FunctionBuilder; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLCheckConstraintBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLDefaultConstraintBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLForeignKeyBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLNotNullConstraintBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLPrimaryKeyBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLUniqueConstraintBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +public abstract class TestNonCatCallsWithCatalog { + + private static final String OTHER_DATABASE = "non_cat_other_db"; + private Table[] testTables = new Table[6]; + private static final String TEST_FUNCTION_CLASS = + "org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper"; + + protected Configuration conf; + + protected IMetaStoreClient client; + protected abstract IMetaStoreClient getClient() throws Exception; + protected abstract String expectedCatalog(); + protected abstract String expectedBaseDir() throws MetaException; + + @Before + public void setUp() throws Exception { + conf = MetastoreConf.newMetastoreConf(); + MetaStoreTestUtils.setConfForStandloneMode(conf); + + // Get new client + client = getClient(); + + List databases = client.getAllDatabases(); + for (String db : databases) { + if (!DEFAULT_DATABASE_NAME.equals(db)) { + client.dropDatabase(db, true, true, true); + } + } + // Drop every table in the default database + for(String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) { + client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true); + } + + testTables[0] = + new TableBuilder() + .setTableName("test_table") + .addCol("test_col1", "int") + .addCol("test_col2", "int") + .addCol("test_col3", "int") + .create(client, conf); + + testTables[1] = + new TableBuilder() + .setTableName("test_view") + .addCol("test_col1", "int") + .addCol("test_col2", "int") + .addCol("test_col3", "int") + .setType("VIRTUAL_VIEW") + .create(client, conf); + + testTables[2] = + new TableBuilder() + .setTableName("test_table_to_find_1") + .addCol("test_col1", "int") + .addCol("test_col2", "int") + .addCol("test_col3", "int") + .create(client, conf); + + testTables[3] = + new TableBuilder() + .setTableName("test_partitioned_table") + .addCol("test_col1", "int") + .addCol("test_col2", "int") + .addPartCol("test_part_col", "int") + .create(client, conf); + + testTables[4] = + new TableBuilder() + .setTableName("external_table_for_test") + .addCol("test_col", "int") + .setLocation(MetaStoreTestUtils.getTestWarehouseDir("/external/table_dir")) + .addTableParam("EXTERNAL", "TRUE") + .setType("EXTERNAL_TABLE") + .create(client, conf); + + + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, conf); + + testTables[5] = + new TableBuilder() + .setDbName(OTHER_DATABASE) + .setTableName("test_table") + .addCol("test_col", "int") + .create(client, conf); + + // Create partitions for the partitioned table + for(int i=0; i < 3; i++) { + new PartitionBuilder() + .inTable(testTables[3]) + .addValue("a" + i) + .addToTable(client, conf); + } + + } + + @After + public void tearDown() throws Exception { + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + @Test + public void databases() throws TException, URISyntaxException { + String[] dbNames = {"db1", "db9"}; + Database[] dbs = new Database[2]; + // For this one don't specify a location to make sure it gets put in the catalog directory + dbs[0] = new DatabaseBuilder() + .setName(dbNames[0]) + .create(client, conf); + + // For the second one, explicitly set a location to make sure it ends up in the specified place. + String db1Location = MetaStoreTestUtils.getTestWarehouseDir(dbNames[1]); + dbs[1] = new DatabaseBuilder() + .setName(dbNames[1]) + .setLocation(db1Location) + .create(client, conf); + + Database fetched = client.getDatabase(dbNames[0]); + String expectedLocation = new File(expectedBaseDir(), dbNames[0] + ".db").toURI().toString(); + Assert.assertEquals(expectedCatalog(), fetched.getCatalogName()); + Assert.assertEquals(expectedLocation, fetched.getLocationUri() + "/"); + String db0Location = new URI(fetched.getLocationUri()).getPath(); + File dir = new File(db0Location); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + Assert.assertEquals(expectedCatalog(), fetched.getCatalogName()); + + fetched = client.getDatabase(dbNames[1]); + Assert.assertEquals(new File(db1Location).toURI().toString(), fetched.getLocationUri() + "/"); + dir = new File(new URI(fetched.getLocationUri()).getPath()); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + Assert.assertEquals(expectedCatalog(), fetched.getCatalogName()); + + Set fetchedDbs = new HashSet<>(client.getAllDatabases()); + for (String dbName : dbNames) Assert.assertTrue(fetchedDbs.contains(dbName)); + + fetchedDbs = new HashSet<>(client.getDatabases("db*")); + Assert.assertEquals(2, fetchedDbs.size()); + for (String dbName : dbNames) Assert.assertTrue(fetchedDbs.contains(dbName)); + + client.dropDatabase(dbNames[0], true, false, false); + dir = new File(db0Location); + Assert.assertFalse(dir.exists()); + + client.dropDatabase(dbNames[1], true, false, false); + dir = new File(db1Location); + Assert.assertFalse(dir.exists()); + + fetchedDbs = new HashSet<>(client.getAllDatabases()); + for (String dbName : dbNames) Assert.assertFalse(fetchedDbs.contains(dbName)); + } + + @Test + public void tablesCreateDropAlterTruncate() throws TException, URISyntaxException { + String dbName = "db_in_other_catalog"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String[] tableNames = new String[4]; + for (int i = 0; i < tableNames.length; i++) { + tableNames[i] = "table_in_other_catalog_" + i; + TableBuilder builder = new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) + .addCol("col2_" + i, ColumnType.INT_TYPE_NAME); + // Make one have a non-standard location + if (i == 0) builder.setLocation(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])); + // Make one partitioned + if (i == 2) builder.addPartCol("pcol1", ColumnType.STRING_TYPE_NAME); + // Make one a materialized view + /* + // TODO HIVE-18991 + if (i == 3) { + builder.setType(TableType.MATERIALIZED_VIEW.name()) + .setRewriteEnabled(true) + .addMaterializedViewReferencedTable(dbName + "." + tableNames[0]); + } + */ + client.createTable(builder.build(conf)); + } + + // Add partitions for the partitioned table + String[] partVals = new String[3]; + Table partitionedTable = client.getTable(dbName, tableNames[2]); + for (int i = 0; i < partVals.length; i++) { + partVals[i] = "part" + i; + new PartitionBuilder() + .inTable(partitionedTable) + .addValue(partVals[i]) + .addToTable(client, conf); + } + + // Get tables, make sure the locations are correct + for (int i = 0; i < tableNames.length; i++) { + Table t = client.getTable(dbName, tableNames[i]); + Assert.assertEquals(expectedCatalog(), t.getCatName()); + String expectedLocation = (i < 1) ? + new File(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])).toURI().toString() + : + new File(expectedBaseDir() + File.separatorChar + dbName + ".db", + tableNames[i]).toURI().toString(); + + Assert.assertEquals(expectedLocation, t.getSd().getLocation() + "/"); + File dir = new File(new URI(t.getSd().getLocation()).getPath()); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + + } + + // Make sure getting table in the wrong catalog does not work + try { + Table t = client.getTable(DEFAULT_DATABASE_NAME, tableNames[0]); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + + // test getAllTables + Set fetchedNames = new HashSet<>(client.getAllTables(dbName)); + Assert.assertEquals(tableNames.length, fetchedNames.size()); + for (String tableName : tableNames) Assert.assertTrue(fetchedNames.contains(tableName)); + + fetchedNames = new HashSet<>(client.getAllTables(DEFAULT_DATABASE_NAME)); + for (String tableName : tableNames) Assert.assertFalse(fetchedNames.contains(tableName)); + + // test getMaterializedViewsForRewriting + /* TODO HIVE-18991 + List materializedViews = client.getMaterializedViewsForRewriting(dbName); + Assert.assertEquals(1, materializedViews.size()); + Assert.assertEquals(tableNames[3], materializedViews.get(0)); + */ + + fetchedNames = new HashSet<>(client.getMaterializedViewsForRewriting(DEFAULT_DATABASE_NAME)); + Assert.assertFalse(fetchedNames.contains(tableNames[3])); + + // test getTableObjectsByName + List

fetchedTables = client.getTableObjectsByName(dbName, + Arrays.asList(tableNames[0], tableNames[1])); + Assert.assertEquals(2, fetchedTables.size()); + Collections.sort(fetchedTables); + Assert.assertEquals(tableNames[0], fetchedTables.get(0).getTableName()); + Assert.assertEquals(tableNames[1], fetchedTables.get(1).getTableName()); + + fetchedTables = client.getTableObjectsByName(DEFAULT_DATABASE_NAME, + Arrays.asList(tableNames[0], tableNames[1])); + Assert.assertEquals(0, fetchedTables.size()); + + // Test altering the table + Table t = client.getTable(dbName, tableNames[0]).deepCopy(); + t.getParameters().put("test", "test"); + client.alter_table(dbName, tableNames[0], t); + t = client.getTable(dbName, tableNames[0]).deepCopy(); + Assert.assertEquals("test", t.getParameters().get("test")); + + // Alter a table in the wrong catalog + try { + client.alter_table(DEFAULT_DATABASE_NAME, tableNames[0], t); + Assert.fail(); + } catch (InvalidOperationException e) { + // NOP + } + + // Update the metadata for the materialized view + /* TODO HIVE-18991 + CreationMetadata cm = client.getTable(dbName, tableNames[3]).getCreationMetadata(); + cm.addToTablesUsed(dbName + "." + tableNames[1]); + client.updateCreationMetadata(dbName, tableNames[3], cm); + */ + + List partNames = new ArrayList<>(); + for (String partVal : partVals) partNames.add("pcol1=" + partVal); + // Truncate a table + client.truncateTable(dbName, tableNames[0], partNames); + + // Have to do this in reverse order so that we drop the materialized view first. + for (int i = tableNames.length - 1; i >= 0; i--) { + t = client.getTable(dbName, tableNames[i]); + File tableDir = new File(new URI(t.getSd().getLocation()).getPath()); + Assert.assertTrue(tableDir.exists() && tableDir.isDirectory()); + + if (tableNames[i].equalsIgnoreCase(tableNames[0])) { + client.dropTable(dbName, tableNames[i], false, false); + Assert.assertTrue(tableDir.exists() && tableDir.isDirectory()); + } else { + client.dropTable(dbName, tableNames[i]); + Assert.assertFalse(tableDir.exists()); + } + } + Assert.assertEquals(0, client.getAllTables(dbName).size()); + } + + @Test + public void tablesGetExists() throws TException { + String dbName = "db_in_other_catalog"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String[] tableNames = new String[4]; + for (int i = 0; i < tableNames.length; i++) { + tableNames[i] = "table_in_other_catalog_" + i; + new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) + .addCol("col2_" + i, ColumnType.INT_TYPE_NAME) + .create(client, conf); + } + + Set tables = new HashSet<>(client.getTables(dbName, "*e_in_other_*")); + Assert.assertEquals(4, tables.size()); + for (String tableName : tableNames) Assert.assertTrue(tables.contains(tableName)); + + List fetchedNames = client.getTables(dbName, "*_3"); + Assert.assertEquals(1, fetchedNames.size()); + Assert.assertEquals(tableNames[3], fetchedNames.get(0)); + + Assert.assertTrue("Table exists", client.tableExists(dbName, tableNames[0])); + Assert.assertFalse("Table not exists", client.tableExists(dbName, "non_existing_table")); + } + + @Test + public void tablesList() throws TException { + String dbName = "db_in_other_catalog"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String[] tableNames = new String[4]; + for (int i = 0; i < tableNames.length; i++) { + tableNames[i] = "table_in_other_catalog_" + i; + TableBuilder builder = new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) + .addCol("col2_" + i, ColumnType.INT_TYPE_NAME); + if (i == 0) builder.addTableParam("the_key", "the_value"); + builder.create(client, conf); + } + + String filter = hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "the_key=\"the_value\""; + List fetchedNames = client.listTableNamesByFilter(dbName, filter, (short)-1); + Assert.assertEquals(1, fetchedNames.size()); + Assert.assertEquals(tableNames[0], fetchedNames.get(0)); + } + + @Test + public void getTableMeta() throws TException { + String dbName = "db9"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String[] tableNames = {"table_in_other_catalog_1", "table_in_other_catalog_2", "random_name"}; + List expected = new ArrayList<>(tableNames.length); + for (int i = 0; i < tableNames.length; i++) { + client.createTable(new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("id", "int") + .addCol("name", "string") + .build(conf)); + expected.add(new TableMeta(dbName, tableNames[i], TableType.MANAGED_TABLE.name())); + } + + List types = Collections.singletonList(TableType.MANAGED_TABLE.name()); + List actual = client.getTableMeta(dbName, "*", types); + Assert.assertEquals(new TreeSet<>(expected), new TreeSet<>(actual)); + + actual = client.getTableMeta("*", "table_*", types); + Assert.assertEquals(expected.subList(0, 2), actual.subList(0, 2)); + + } + + @Test + public void addPartitions() throws TException { + String dbName = "add_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, conf); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < parts.length; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .build(conf); + } + client.add_partition(parts[0]); + Assert.assertEquals(2, client.add_partitions(Arrays.asList(parts[1], parts[2]))); + client.add_partitions(Arrays.asList(parts), true, false); + + for (int i = 0; i < parts.length; i++) { + Partition fetched = client.getPartition(dbName, tableName, + Collections.singletonList("a" + i)); + Assert.assertEquals(dbName, fetched.getDbName()); + Assert.assertEquals(tableName, fetched.getTableName()); + Assert.assertEquals(expectedCatalog(), fetched.getCatName()); + } + + client.dropDatabase(dbName, true, true, true); + } + + @Test + public void getPartitions() throws TException { + String dbName = "get_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .addTableParam("PARTITION_LEVEL_PRIVILEGE", "true") + .create(client, conf); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < parts.length; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .build(conf); + } + client.add_partitions(Arrays.asList(parts)); + + Partition fetched = client.getPartition(dbName, tableName, + Collections.singletonList("a0")); + Assert.assertEquals(expectedCatalog(), fetched.getCatName()); + Assert.assertEquals("a0", fetched.getValues().get(0)); + + fetched = client.getPartition(dbName, tableName, "partcol=a0"); + Assert.assertEquals(expectedCatalog(), fetched.getCatName()); + Assert.assertEquals("a0", fetched.getValues().get(0)); + + List fetchedParts = client.getPartitionsByNames(dbName, tableName, + Arrays.asList("partcol=a0", "partcol=a1")); + Assert.assertEquals(2, fetchedParts.size()); + Set vals = new HashSet<>(fetchedParts.size()); + for (Partition part : fetchedParts) vals.add(part.getValues().get(0)); + Assert.assertTrue(vals.contains("a0")); + Assert.assertTrue(vals.contains("a1")); + + } + + @Test + public void listPartitions() throws TException { + String dbName = "list_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, conf); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < parts.length; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .build(conf); + } + client.add_partitions(Arrays.asList(parts)); + + List fetched = client.listPartitions(dbName, tableName, (short)-1); + Assert.assertEquals(parts.length, fetched.size()); + Assert.assertEquals(expectedCatalog(), fetched.get(0).getCatName()); + + fetched = client.listPartitions(dbName, tableName, + Collections.singletonList("a0"), (short)-1); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(expectedCatalog(), fetched.get(0).getCatName()); + + PartitionSpecProxy proxy = client.listPartitionSpecs(dbName, tableName, -1); + Assert.assertEquals(parts.length, proxy.size()); + Assert.assertEquals(expectedCatalog(), proxy.getCatName()); + + fetched = client.listPartitionsByFilter(dbName, tableName, "partcol=\"a0\"", (short)-1); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(expectedCatalog(), fetched.get(0).getCatName()); + + proxy = client.listPartitionSpecsByFilter(dbName, tableName, "partcol=\"a0\"", -1); + Assert.assertEquals(1, proxy.size()); + Assert.assertEquals(expectedCatalog(), proxy.getCatName()); + + Assert.assertEquals(1, client.getNumPartitionsByFilter(dbName, tableName, + "partcol=\"a0\"")); + + List names = client.listPartitionNames(dbName, tableName, (short)57); + Assert.assertEquals(parts.length, names.size()); + + names = client.listPartitionNames(dbName, tableName, Collections.singletonList("a0"), + Short.MAX_VALUE); + Assert.assertEquals(1, names.size()); + + PartitionValuesRequest rqst = new PartitionValuesRequest(dbName, + tableName, Lists.newArrayList(new FieldSchema("partcol", "string", ""))); + PartitionValuesResponse rsp = client.listPartitionValues(rqst); + Assert.assertEquals(5, rsp.getPartitionValuesSize()); + } + + @Test + public void alterPartitions() throws TException { + String dbName = "alter_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, conf); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < 5; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir("b" + i)) + .build(conf); + } + client.add_partitions(Arrays.asList(parts)); + + Partition newPart = + client.getPartition(dbName, tableName, Collections.singletonList("a0")); + newPart.getParameters().put("test_key", "test_value"); + client.alter_partition(dbName, tableName, newPart); + + Partition fetched = + client.getPartition(dbName, tableName, Collections.singletonList("a0")); + Assert.assertEquals("test_value", fetched.getParameters().get("test_key")); + + newPart = + client.getPartition(dbName, tableName, Collections.singletonList("a1")); + newPart.setLastAccessTime(3); + Partition newPart1 = + client.getPartition(dbName, tableName, Collections.singletonList("a2")); + newPart1.getSd().setLocation(MetaStoreTestUtils.getTestWarehouseDir("somewhere")); + client.alter_partitions(dbName, tableName, Arrays.asList(newPart, newPart1)); + fetched = + client.getPartition(dbName, tableName, Collections.singletonList("a1")); + Assert.assertEquals(3L, fetched.getLastAccessTime()); + fetched = + client.getPartition(dbName, tableName, Collections.singletonList("a2")); + Assert.assertTrue(fetched.getSd().getLocation().contains("somewhere")); + + newPart = + client.getPartition(dbName, tableName, Collections.singletonList("a4")); + newPart.getParameters().put("test_key", "test_value"); + EnvironmentContext ec = new EnvironmentContext(); + ec.setProperties(Collections.singletonMap("a", "b")); + client.alter_partition(dbName, tableName, newPart, ec); + fetched = + client.getPartition(dbName, tableName, Collections.singletonList("a4")); + Assert.assertEquals("test_value", fetched.getParameters().get("test_key")); + + + client.dropDatabase(dbName, true, true, true); + } + + @Test + public void dropPartitions() throws TException { + String dbName = "drop_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, conf); + + Partition[] parts = new Partition[2]; + for (int i = 0; i < parts.length; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .build(conf); + } + client.add_partitions(Arrays.asList(parts)); + List fetched = client.listPartitions(dbName, tableName, (short)-1); + Assert.assertEquals(parts.length, fetched.size()); + + Assert.assertTrue(client.dropPartition(dbName, tableName, + Collections.singletonList("a0"), PartitionDropOptions.instance().ifExists(false))); + try { + client.getPartition(dbName, tableName, Collections.singletonList("a0")); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + + Assert.assertTrue(client.dropPartition(dbName, tableName, "partcol=a1", true)); + try { + client.getPartition(dbName, tableName, Collections.singletonList("a1")); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + } + + @Test + public void primaryKeyAndForeignKey() throws TException { + Table parentTable = testTables[2]; + Table table = testTables[3]; + String constraintName = "othercatfk"; + + // Single column unnamed primary key in default catalog and database + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("test_col1") + .build(conf); + client.addPrimaryKey(pk); + + List fk = new SQLForeignKeyBuilder() + .fromPrimaryKey(pk) + .onTable(table) + .addColumn("test_col1") + .setConstraintName(constraintName) + .build(conf); + client.addForeignKey(fk); + + PrimaryKeysRequest pkRqst = new PrimaryKeysRequest(parentTable.getDbName(), + parentTable.getTableName()); + pkRqst.setCatName(parentTable.getCatName()); + List pkFetched = client.getPrimaryKeys(pkRqst); + Assert.assertEquals(1, pkFetched.size()); + Assert.assertEquals(expectedCatalog(), pkFetched.get(0).getCatName()); + Assert.assertEquals(parentTable.getDbName(), pkFetched.get(0).getTable_db()); + Assert.assertEquals(parentTable.getTableName(), pkFetched.get(0).getTable_name()); + Assert.assertEquals("test_col1", pkFetched.get(0).getColumn_name()); + Assert.assertEquals(1, pkFetched.get(0).getKey_seq()); + Assert.assertTrue(pkFetched.get(0).isEnable_cstr()); + Assert.assertFalse(pkFetched.get(0).isValidate_cstr()); + Assert.assertFalse(pkFetched.get(0).isRely_cstr()); + Assert.assertEquals(parentTable.getCatName(), pkFetched.get(0).getCatName()); + + ForeignKeysRequest rqst = new ForeignKeysRequest(parentTable.getDbName(), + parentTable.getTableName(), table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getForeignKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getFktable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getFktable_name()); + Assert.assertEquals(expectedCatalog(), fetched.get(0).getCatName()); + Assert.assertEquals("test_col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(parentTable.getDbName(), fetched.get(0).getPktable_db()); + Assert.assertEquals(parentTable.getTableName(), fetched.get(0).getPktable_name()); + Assert.assertEquals("test_col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(parentTable.getTableName() + "_primary_key", fetched.get(0).getPk_name()); + Assert.assertEquals(constraintName, fetched.get(0).getFk_name()); + String table0FkName = fetched.get(0).getFk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a foreign key + client.dropConstraint(table.getDbName(), table.getTableName(), table0FkName); + rqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable.getTableName(), + table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getForeignKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void notNullConstraint() throws TException { + String constraintName = "ocuc"; + // Table in non 'hive' catalog + List nn = new SQLNotNullConstraintBuilder() + .onTable(testTables[2]) + .addColumn("test_col1") + .setConstraintName(constraintName) + .build(conf); + client.addNotNullConstraint(nn); + + NotNullConstraintsRequest rqst = new NotNullConstraintsRequest(testTables[2].getCatName(), + testTables[2].getDbName(), testTables[2].getTableName()); + List fetched = client.getNotNullConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(testTables[2].getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(testTables[2].getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("test_col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(constraintName, fetched.get(0).getNn_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(testTables[2].getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(testTables[2].getDbName(), testTables[2].getTableName(), constraintName); + rqst = new NotNullConstraintsRequest(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName()); + fetched = client.getNotNullConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void uniqueConstraint() throws TException { + String constraintName = "ocuc"; + // Table in non 'hive' catalog + List uc = new SQLUniqueConstraintBuilder() + .onTable(testTables[2]) + .addColumn("test_col1") + .setConstraintName(constraintName) + .build(conf); + client.addUniqueConstraint(uc); + + UniqueConstraintsRequest rqst = new UniqueConstraintsRequest(testTables[2].getCatName(), + testTables[2].getDbName(), testTables[2].getTableName()); + List fetched = client.getUniqueConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(testTables[2].getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(testTables[2].getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("test_col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(constraintName, fetched.get(0).getUk_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(testTables[2].getCatName(), fetched.get(0).getCatName()); + Assert.assertEquals(expectedCatalog(), fetched.get(0).getCatName()); + + client.dropConstraint(testTables[2].getDbName(), testTables[2].getTableName(), constraintName); + rqst = new UniqueConstraintsRequest(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void defaultConstraints() throws TException { + String constraintName = "ocdv"; + // Table in non 'hive' catalog + List dv = new SQLDefaultConstraintBuilder() + .onTable(testTables[2]) + .addColumn("test_col1") + .setConstraintName(constraintName) + .setDefaultVal("empty") + .build(conf); + client.addDefaultConstraint(dv); + + DefaultConstraintsRequest rqst = new DefaultConstraintsRequest(testTables[2].getCatName(), + testTables[2].getDbName(), testTables[2].getTableName()); + List fetched = client.getDefaultConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(expectedCatalog(), fetched.get(0).getCatName()); + Assert.assertEquals(testTables[2].getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(testTables[2].getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("test_col1", fetched.get(0).getColumn_name()); + Assert.assertEquals("empty", fetched.get(0).getDefault_value()); + Assert.assertEquals(constraintName, fetched.get(0).getDc_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(testTables[2].getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(testTables[2].getDbName(), testTables[2].getTableName(), constraintName); + rqst = new DefaultConstraintsRequest(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName()); + fetched = client.getDefaultConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void createTableWithConstraints() throws TException { + Table parentTable = testTables[2]; + + + Table table = new TableBuilder() + .setTableName("table_in_other_catalog_with_constraints") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .addCol("col3", "int") + .addCol("col4", "int") + .addCol("col5", "int") + .addCol("col6", "int") + .build(conf); + + List parentPk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("test_col1") + .build(conf); + client.addPrimaryKey(parentPk); + + List pk = new SQLPrimaryKeyBuilder() + .onTable(table) + .addColumn("col2") + .build(conf); + + List fk = new SQLForeignKeyBuilder() + .fromPrimaryKey(parentPk) + .onTable(table) + .addColumn("col1") + .build(conf); + + List dv = new SQLDefaultConstraintBuilder() + .onTable(table) + .addColumn("col3") + .setDefaultVal(0) + .build(conf); + + List nn = new SQLNotNullConstraintBuilder() + .onTable(table) + .addColumn("col4") + .build(conf); + + List uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col5") + .build(conf); + + List cc = new SQLCheckConstraintBuilder() + .onTable(table) + .addColumn("col6") + .setCheckExpression("> 0") + .build(conf); + + client.createTableWithConstraints(table, pk, fk, uc, nn, dv, cc); + + PrimaryKeysRequest pkRqst = new PrimaryKeysRequest(parentTable.getDbName(), + parentTable.getTableName()); + pkRqst.setCatName(parentTable.getCatName()); + List pkFetched = client.getPrimaryKeys(pkRqst); + Assert.assertEquals(1, pkFetched.size()); + Assert.assertEquals(expectedCatalog(), pkFetched.get(0).getCatName()); + Assert.assertEquals(parentTable.getDbName(), pkFetched.get(0).getTable_db()); + Assert.assertEquals(parentTable.getTableName(), pkFetched.get(0).getTable_name()); + Assert.assertEquals("test_col1", pkFetched.get(0).getColumn_name()); + Assert.assertEquals(1, pkFetched.get(0).getKey_seq()); + Assert.assertTrue(pkFetched.get(0).isEnable_cstr()); + Assert.assertFalse(pkFetched.get(0).isValidate_cstr()); + Assert.assertFalse(pkFetched.get(0).isRely_cstr()); + Assert.assertEquals(parentTable.getCatName(), pkFetched.get(0).getCatName()); + + ForeignKeysRequest fkRqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable + .getTableName(), + table.getDbName(), table.getTableName()); + fkRqst.setCatName(table.getCatName()); + List fkFetched = client.getForeignKeys(fkRqst); + Assert.assertEquals(1, fkFetched.size()); + Assert.assertEquals(expectedCatalog(), fkFetched.get(0).getCatName()); + Assert.assertEquals(table.getDbName(), fkFetched.get(0).getFktable_db()); + Assert.assertEquals(table.getTableName(), fkFetched.get(0).getFktable_name()); + Assert.assertEquals("col1", fkFetched.get(0).getFkcolumn_name()); + Assert.assertEquals(parentTable.getDbName(), fkFetched.get(0).getPktable_db()); + Assert.assertEquals(parentTable.getTableName(), fkFetched.get(0).getPktable_name()); + Assert.assertEquals(1, fkFetched.get(0).getKey_seq()); + Assert.assertEquals(parentTable.getTableName() + "_primary_key", fkFetched.get(0).getPk_name()); + Assert.assertTrue(fkFetched.get(0).isEnable_cstr()); + Assert.assertFalse(fkFetched.get(0).isValidate_cstr()); + Assert.assertFalse(fkFetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fkFetched.get(0).getCatName()); + + NotNullConstraintsRequest nnRqst = new NotNullConstraintsRequest(table.getCatName(), + table.getDbName(), table.getTableName()); + List nnFetched = client.getNotNullConstraints(nnRqst); + Assert.assertEquals(1, nnFetched.size()); + Assert.assertEquals(table.getDbName(), nnFetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), nnFetched.get(0).getTable_name()); + Assert.assertEquals("col4", nnFetched.get(0).getColumn_name()); + Assert.assertEquals(table.getTableName() + "_not_null_constraint", nnFetched.get(0).getNn_name()); + Assert.assertTrue(nnFetched.get(0).isEnable_cstr()); + Assert.assertFalse(nnFetched.get(0).isValidate_cstr()); + Assert.assertFalse(nnFetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), nnFetched.get(0).getCatName()); + + UniqueConstraintsRequest ucRqst = new UniqueConstraintsRequest(table.getCatName(), table + .getDbName(), table.getTableName()); + List ucFetched = client.getUniqueConstraints(ucRqst); + Assert.assertEquals(1, ucFetched.size()); + Assert.assertEquals(table.getDbName(), ucFetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), ucFetched.get(0).getTable_name()); + Assert.assertEquals("col5", ucFetched.get(0).getColumn_name()); + Assert.assertEquals(1, ucFetched.get(0).getKey_seq()); + Assert.assertEquals(table.getTableName() + "_unique_constraint", ucFetched.get(0).getUk_name()); + Assert.assertTrue(ucFetched.get(0).isEnable_cstr()); + Assert.assertFalse(ucFetched.get(0).isValidate_cstr()); + Assert.assertFalse(ucFetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), ucFetched.get(0).getCatName()); + + DefaultConstraintsRequest dcRqst = new DefaultConstraintsRequest(table.getCatName(), table + .getDbName(), table.getTableName()); + List dcFetched = client.getDefaultConstraints(dcRqst); + Assert.assertEquals(1, dcFetched.size()); + Assert.assertEquals(expectedCatalog(), dcFetched.get(0).getCatName()); + Assert.assertEquals(table.getDbName(), dcFetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), dcFetched.get(0).getTable_name()); + Assert.assertEquals("col3", dcFetched.get(0).getColumn_name()); + Assert.assertEquals("0", dcFetched.get(0).getDefault_value()); + Assert.assertEquals(table.getTableName() + "_default_value", dcFetched.get(0).getDc_name()); + Assert.assertTrue(dcFetched.get(0).isEnable_cstr()); + Assert.assertFalse(dcFetched.get(0).isValidate_cstr()); + Assert.assertFalse(dcFetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), dcFetched.get(0).getCatName()); + + CheckConstraintsRequest ccRqst = new CheckConstraintsRequest(table.getCatName(), table + .getDbName(), table.getTableName()); + List ccFetched = client.getCheckConstraints(ccRqst); + Assert.assertEquals(1, ccFetched.size()); + Assert.assertEquals(expectedCatalog(), ccFetched.get(0).getCatName()); + Assert.assertEquals(table.getDbName(), ccFetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), ccFetched.get(0).getTable_name()); + Assert.assertEquals("col6", ccFetched.get(0).getColumn_name()); + Assert.assertEquals("> 0", ccFetched.get(0).getCheck_expression()); + Assert.assertEquals(table.getTableName() + "_check_constraint", ccFetched.get(0).getDc_name()); + Assert.assertTrue(ccFetched.get(0).isEnable_cstr()); + Assert.assertFalse(ccFetched.get(0).isValidate_cstr()); + Assert.assertFalse(ccFetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), ccFetched.get(0).getCatName()); + } + + @Test + public void functions() throws TException { + String dbName = "functions_other_catalog_db"; + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String functionName = "test_function"; + Function function = + new FunctionBuilder() + .inDb(db) + .setName(functionName) + .setClass(TEST_FUNCTION_CLASS) + .setFunctionType(FunctionType.JAVA) + .setOwnerType(PrincipalType.ROLE) + .setOwner("owner") + .setCreateTime(100) + .addResourceUri(new ResourceUri(ResourceType.JAR, "hdfs:///tmp/jar1.jar")) + .addResourceUri(new ResourceUri(ResourceType.FILE, "hdfs:///tmp/file1.txt")) + .addResourceUri(new ResourceUri(ResourceType.ARCHIVE, "hdfs:///tmp/archive1.tgz")) + .create(client, conf); + + Function createdFunction = client.getFunction(dbName, functionName); + // The createTime will be set on the server side, so the comparison should skip it + function.setCreateTime(createdFunction.getCreateTime()); + Assert.assertEquals("Comparing functions", function, createdFunction); + + String f2Name = "testy_function2"; + Function f2 = new FunctionBuilder() + .inDb(db) + .setName(f2Name) + .setClass(TEST_FUNCTION_CLASS) + .create(client, conf); + + Set functions = new HashSet<>(client.getFunctions(dbName, "test*")); + Assert.assertEquals(2, functions.size()); + Assert.assertTrue(functions.contains(functionName)); + Assert.assertTrue(functions.contains(f2Name)); + + functions = new HashSet<>(client.getFunctions(dbName, "test_*")); + Assert.assertEquals(1, functions.size()); + Assert.assertTrue(functions.contains(functionName)); + Assert.assertFalse(functions.contains(f2Name)); + + client.dropFunction(function.getDbName(), function.getFunctionName()); + try { + client.getFunction(function.getDbName(), function.getFunctionName()); + Assert.fail("Expected a NoSuchObjectException to be thrown"); + } catch (NoSuchObjectException exception) { + // Expected exception + } + } + + // Run a test without the builders. They make certain default assumptions about catalogs, etc. + // Make sure things still work without those assumptions. + @Test + public void noBuilders() throws TException { + String dbName = "db_no_builder"; + + Database db = new Database(dbName, "bla", MetaStoreTestUtils.getTestWarehouseDir(dbName), + new HashMap<>()); + client.createDatabase(db); + + Database fetched = client.getDatabase(dbName); + Assert.assertEquals(expectedCatalog(), fetched.getCatalogName()); + + String tableName = "now_I_remember_why_I_made_those_builders"; + List cols = Arrays.asList( + new FieldSchema("col1", "int", ""), + new FieldSchema("col2", "int", "") + ); + List partKeys = Collections.singletonList(new FieldSchema("pk1", "string", "")); + SerDeInfo serdeInfo = new SerDeInfo("serde", "lib", new HashMap<>()); + StorageDescriptor sd = new StorageDescriptor(cols, null, + "org.apache.hadoop.hive.ql.io.HiveInputFormat", + "org.apache.hadoop.hive.ql.io.HiveOutputFormat", false, 0, serdeInfo, new ArrayList<>(), + new ArrayList<>(), new HashMap<>()); + Table table = new Table(tableName, dbName, "me", 0, 0, 0, sd, partKeys, new HashMap<>(), + null, null, TableType.MANAGED_TABLE.name()); + client.createTable(table); + + Table fetchedTable = client.getTable(dbName, tableName); + Assert.assertEquals(expectedCatalog(), fetchedTable.getCatName()); + + List values = Collections.singletonList("p1"); + Partition part = new Partition(values, dbName, tableName, 0, 0, sd, new HashMap<>()); + client.add_partition(part); + + Partition fetchedPart = client.getPartition(dbName, tableName, values); + Assert.assertEquals(expectedCatalog(), fetchedPart.getCatName()); + + client.dropDatabase(dbName, true, false, true); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index ca33b7da21..9490586aaf 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hive.metastore; - import com.codahale.metrics.Counter; import com.google.common.base.Supplier; import com.google.common.collect.ImmutableList; import org.apache.hadoop.hive.metastore.ObjectStore.RetryingExecutor; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -43,6 +43,8 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.messaging.EventMessage; import org.apache.hadoop.hive.metastore.metrics.Metrics; @@ -72,9 +74,12 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + @Category(MetastoreUnitTest.class) public class TestObjectStore { private ObjectStore objectStore = null; + private Configuration conf; private static final String DB1 = "testobjectstoredb1"; private static final String DB2 = "testobjectstoredb2"; @@ -98,37 +103,88 @@ public Long get() { @Before public void setUp() throws Exception { - Configuration conf = MetastoreConf.newMetastoreConf(); + conf = MetastoreConf.newMetastoreConf(); MetaStoreTestUtils.setConfForStandloneMode(conf); objectStore = new ObjectStore(); objectStore.setConf(conf); dropAllStoreObjects(objectStore); + HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf)); + } + + @Test + public void catalogs() throws MetaException, NoSuchObjectException { + final String names[] = {"cat1", "cat2"}; + final String locations[] = {"loc1", "loc2"}; + final String descriptions[] = {"description 1", "description 2"}; + + for (int i = 0; i < names.length; i++) { + Catalog cat = new CatalogBuilder() + .setName(names[i]) + .setLocation(locations[i]) + .setDescription(descriptions[i]) + .build(); + objectStore.createCatalog(cat); + } + + List fetchedNames = objectStore.getCatalogs(); + Assert.assertEquals(3, fetchedNames.size()); + for (int i = 0; i < names.length - 1; i++) { + Assert.assertEquals(names[i], fetchedNames.get(i)); + Catalog cat = objectStore.getCatalog(fetchedNames.get(i)); + Assert.assertEquals(names[i], cat.getName()); + Assert.assertEquals(descriptions[i], cat.getDescription()); + Assert.assertEquals(locations[i], cat.getLocationUri()); + } + Catalog cat = objectStore.getCatalog(fetchedNames.get(2)); + Assert.assertEquals(DEFAULT_CATALOG_NAME, cat.getName()); + Assert.assertEquals(Warehouse.DEFAULT_CATALOG_COMMENT, cat.getDescription()); + // Location will vary by system. + + for (int i = 0; i < names.length; i++) objectStore.dropCatalog(names[i]); + fetchedNames = objectStore.getCatalogs(); + Assert.assertEquals(1, fetchedNames.size()); } + @Test(expected = NoSuchObjectException.class) + public void getNoSuchCatalog() throws MetaException, NoSuchObjectException { + objectStore.getCatalog("no_such_catalog"); + } + + @Test(expected = NoSuchObjectException.class) + public void dropNoSuchCatalog() throws MetaException, NoSuchObjectException { + objectStore.dropCatalog("no_such_catalog"); + } + + // TODO test dropping non-empty catalog + /** * Test database operations */ @Test public void testDatabaseOps() throws MetaException, InvalidObjectException, NoSuchObjectException { + String catName = "tdo1_cat"; + createTestCatalog(catName); Database db1 = new Database(DB1, "description", "locationurl", null); Database db2 = new Database(DB2, "description", "locationurl", null); + db1.setCatalogName(catName); + db2.setCatalogName(catName); objectStore.createDatabase(db1); objectStore.createDatabase(db2); - List databases = objectStore.getAllDatabases(); + List databases = objectStore.getAllDatabases(catName); LOG.info("databases: " + databases); Assert.assertEquals(2, databases.size()); Assert.assertEquals(DB1, databases.get(0)); Assert.assertEquals(DB2, databases.get(1)); - objectStore.dropDatabase(DB1); - databases = objectStore.getAllDatabases(); + objectStore.dropDatabase(catName, DB1); + databases = objectStore.getAllDatabases(catName); Assert.assertEquals(1, databases.size()); Assert.assertEquals(DB2, databases.get(0)); - objectStore.dropDatabase(DB2); + objectStore.dropDatabase(catName, DB2); } /** @@ -137,7 +193,11 @@ public void testDatabaseOps() throws MetaException, InvalidObjectException, @Test public void testTableOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { - Database db1 = new Database(DB1, "description", "locationurl", null); + Database db1 = new DatabaseBuilder() + .setName(DB1) + .setDescription("description") + .setLocation("locationurl") + .build(conf); objectStore.createDatabase(db1); StorageDescriptor sd1 = new StorageDescriptor(ImmutableList.of(new FieldSchema("pk_col", "double", null)), @@ -149,7 +209,7 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO new Table(TABLE1, DB1, "owner", 1, 2, 3, sd1, null, params, null, null, "MANAGED_TABLE"); objectStore.createTable(tbl1); - List tables = objectStore.getAllTables(DB1); + List tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1); Assert.assertEquals(1, tables.size()); Assert.assertEquals(TABLE1, tables.get(0)); @@ -159,20 +219,21 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO null, null, null); Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd2, null, params, null, null, "MANAGED_TABLE"); - objectStore.alterTable(DB1, TABLE1, newTbl1); - tables = objectStore.getTables(DB1, "new*"); + objectStore.alterTable(DEFAULT_CATALOG_NAME, DB1, TABLE1, newTbl1); + tables = objectStore.getTables(DEFAULT_CATALOG_NAME, DB1, "new*"); Assert.assertEquals(1, tables.size()); Assert.assertEquals("new" + TABLE1, tables.get(0)); objectStore.createTable(tbl1); - tables = objectStore.getAllTables(DB1); + tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1); Assert.assertEquals(2, tables.size()); - List foreignKeys = objectStore.getForeignKeys(DB1, TABLE1, null, null); + List foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, DB1, TABLE1, null, null); Assert.assertEquals(0, foreignKeys.size()); SQLPrimaryKey pk = new SQLPrimaryKey(DB1, TABLE1, "pk_col", 1, "pk_const_1", false, false, false); + pk.setCatName(DEFAULT_CATALOG_NAME); objectStore.addPrimaryKeys(ImmutableList.of(pk)); SQLForeignKey fk = new SQLForeignKey(DB1, TABLE1, "pk_col", DB1, "new" + TABLE1, "fk_col", 1, @@ -180,32 +241,32 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO objectStore.addForeignKeys(ImmutableList.of(fk)); // Retrieve from PK side - foreignKeys = objectStore.getForeignKeys(null, null, DB1, "new" + TABLE1); + foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1); Assert.assertEquals(1, foreignKeys.size()); - List fks = objectStore.getForeignKeys(null, null, DB1, "new" + TABLE1); + List fks = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1); if (fks != null) { for (SQLForeignKey fkcol : fks) { - objectStore.dropConstraint(fkcol.getFktable_db(), fkcol.getFktable_name(), + objectStore.dropConstraint(fkcol.getCatName(), fkcol.getFktable_db(), fkcol.getFktable_name(), fkcol.getFk_name()); } } // Retrieve from FK side - foreignKeys = objectStore.getForeignKeys(DB1, TABLE1, null, null); + foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, DB1, TABLE1, null, null); Assert.assertEquals(0, foreignKeys.size()); // Retrieve from PK side - foreignKeys = objectStore.getForeignKeys(null, null, DB1, "new" + TABLE1); + foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1); Assert.assertEquals(0, foreignKeys.size()); - objectStore.dropTable(DB1, TABLE1); - tables = objectStore.getAllTables(DB1); + objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1); + tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1); Assert.assertEquals(1, tables.size()); - objectStore.dropTable(DB1, "new" + TABLE1); - tables = objectStore.getAllTables(DB1); + objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, "new" + TABLE1); + tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1); Assert.assertEquals(0, tables.size()); - objectStore.dropDatabase(DB1); + objectStore.dropDatabase(db1.getCatalogName(), DB1); } private StorageDescriptor createFakeSd(String location) { @@ -220,7 +281,11 @@ private StorageDescriptor createFakeSd(String location) { @Test public void testPartitionOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { - Database db1 = new Database(DB1, "description", "locationurl", null); + Database db1 = new DatabaseBuilder() + .setName(DB1) + .setDescription("description") + .setLocation("locationurl") + .build(conf); objectStore.createDatabase(db1); StorageDescriptor sd = createFakeSd("location"); HashMap tableParams = new HashMap<>(); @@ -235,31 +300,33 @@ public void testPartitionOps() throws MetaException, InvalidObjectException, partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true"); List value1 = Arrays.asList("US", "CA"); Partition part1 = new Partition(value1, DB1, TABLE1, 111, 111, sd, partitionParams); + part1.setCatName(DEFAULT_CATALOG_NAME); objectStore.addPartition(part1); List value2 = Arrays.asList("US", "MA"); Partition part2 = new Partition(value2, DB1, TABLE1, 222, 222, sd, partitionParams); + part2.setCatName(DEFAULT_CATALOG_NAME); objectStore.addPartition(part2); Deadline.startTimer("getPartition"); - List partitions = objectStore.getPartitions(DB1, TABLE1, 10); + List partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10); Assert.assertEquals(2, partitions.size()); Assert.assertEquals(111, partitions.get(0).getCreateTime()); Assert.assertEquals(222, partitions.get(1).getCreateTime()); - int numPartitions = objectStore.getNumPartitionsByFilter(DB1, TABLE1, ""); + int numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, ""); Assert.assertEquals(partitions.size(), numPartitions); - numPartitions = objectStore.getNumPartitionsByFilter(DB1, TABLE1, "country = \"US\""); + numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, "country = \"US\""); Assert.assertEquals(2, numPartitions); - objectStore.dropPartition(DB1, TABLE1, value1); - partitions = objectStore.getPartitions(DB1, TABLE1, 10); + objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, value1); + partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10); Assert.assertEquals(1, partitions.size()); Assert.assertEquals(222, partitions.get(0).getCreateTime()); - objectStore.dropPartition(DB1, TABLE1, value2); - objectStore.dropTable(DB1, TABLE1); - objectStore.dropDatabase(DB1); + objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, value2); + objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1); + objectStore.dropDatabase(db1.getCatalogName(), DB1); } /** @@ -322,7 +389,7 @@ public void testDirectSqlErrorMetrics() throws Exception { Counter directSqlErrors = Metrics.getRegistry().getCounters().get(MetricsConstants.DIRECTSQL_ERRORS); - objectStore.new GetDbHelper("foo", true, true) { + objectStore.new GetDbHelper(DEFAULT_CATALOG_NAME, "foo", true, true) { @Override protected Database getSqlResult(ObjectStore.GetHelper ctx) throws MetaException { return null; @@ -337,7 +404,7 @@ protected Database getJdoResult(ObjectStore.GetHelper ctx) throws Meta Assert.assertEquals(0, directSqlErrors.getCount()); - objectStore.new GetDbHelper("foo", true, true) { + objectStore.new GetDbHelper(DEFAULT_CATALOG_NAME, "foo", true, true) { @Override protected Database getSqlResult(ObjectStore.GetHelper ctx) throws MetaException { throw new RuntimeException(); @@ -357,39 +424,42 @@ private static void dropAllStoreObjects(RawStore store) throws MetaException, InvalidObjectException, InvalidInputException { try { Deadline.registerIfNot(100000); - List functions = store.getAllFunctions(); + List functions = store.getAllFunctions(DEFAULT_CATALOG_NAME); for (Function func : functions) { - store.dropFunction(func.getDbName(), func.getFunctionName()); + store.dropFunction(DEFAULT_CATALOG_NAME, func.getDbName(), func.getFunctionName()); } - List dbs = store.getAllDatabases(); - for (String db : dbs) { - List tbls = store.getAllTables(db); - for (String tbl : tbls) { - Deadline.startTimer("getPartition"); - List parts = store.getPartitions(db, tbl, 100); - for (Partition part : parts) { - store.dropPartition(db, tbl, part.getValues()); - } - // Find any constraints and drop them - Set constraints = new HashSet<>(); - List pk = store.getPrimaryKeys(db, tbl); - if (pk != null) { - for (SQLPrimaryKey pkcol : pk) { - constraints.add(pkcol.getPk_name()); + for (String catName : store.getCatalogs()) { + List dbs = store.getAllDatabases(catName); + for (String db : dbs) { + List tbls = store.getAllTables(DEFAULT_CATALOG_NAME, db); + for (String tbl : tbls) { + Deadline.startTimer("getPartition"); + List parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100); + for (Partition part : parts) { + store.dropPartition(DEFAULT_CATALOG_NAME, db, tbl, part.getValues()); } - } - List fks = store.getForeignKeys(null, null, db, tbl); - if (fks != null) { - for (SQLForeignKey fkcol : fks) { - constraints.add(fkcol.getFk_name()); + // Find any constraints and drop them + Set constraints = new HashSet<>(); + List pk = store.getPrimaryKeys(DEFAULT_CATALOG_NAME, db, tbl); + if (pk != null) { + for (SQLPrimaryKey pkcol : pk) { + constraints.add(pkcol.getPk_name()); + } } + List fks = store.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, db, tbl); + if (fks != null) { + for (SQLForeignKey fkcol : fks) { + constraints.add(fkcol.getFk_name()); + } + } + for (String constraint : constraints) { + store.dropConstraint(DEFAULT_CATALOG_NAME, db, tbl, constraint); + } + store.dropTable(DEFAULT_CATALOG_NAME, db, tbl); } - for (String constraint : constraints) { - store.dropConstraint(db, tbl, constraint); - } - store.dropTable(db, tbl); + store.dropDatabase(catName, db); } - store.dropDatabase(db); + store.dropCatalog(catName); } List roles = store.listRoleNames(); for (String role : roles) { @@ -402,9 +472,9 @@ private static void dropAllStoreObjects(RawStore store) @Test public void testQueryCloseOnError() throws Exception { ObjectStore spy = Mockito.spy(objectStore); - spy.getAllDatabases(); - spy.getAllFunctions(); - spy.getAllTables(DB1); + spy.getAllDatabases(DEFAULT_CATALOG_NAME); + spy.getAllFunctions(DEFAULT_CATALOG_NAME); + spy.getAllTables(DEFAULT_CATALOG_NAME, DB1); spy.getPartitionCount(); Mockito.verify(spy, Mockito.times(3)) .rollbackAndCleanup(Mockito.anyBoolean(), Mockito.anyObject()); @@ -566,5 +636,13 @@ public void testConcurrentAddNotifications() throws ExecutionException, Interrup previousId = event.getEventId(); } } + + private void createTestCatalog(String catName) throws MetaException { + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation("/tmp") + .build(); + objectStore.createCatalog(cat); + } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreSchemaMethods.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreSchemaMethods.java index 2b8fbd1bd2..137082f863 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreSchemaMethods.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreSchemaMethods.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; import org.apache.hadoop.hive.metastore.api.SchemaVersionState; import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.ISchemaBuilder; import org.apache.hadoop.hive.metastore.client.builder.SchemaVersionBuilder; @@ -47,16 +48,19 @@ import java.util.Collections; import java.util.Comparator; import java.util.List; +import java.util.Random; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; @Category(MetastoreCheckinTest.class) public class TestObjectStoreSchemaMethods { private RawStore objectStore; + private Configuration conf; @Before public void setUp() throws Exception { - Configuration conf = MetastoreConf.newMetastoreConf(); + conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, DefaultPartitionExpressionProxy.class.getName()); @@ -66,8 +70,8 @@ public void setUp() throws Exception { @Test public void iSchema() throws TException { - String dbName = createUniqueDatabaseForTest(); - ISchema schema = objectStore.getISchema(new ISchemaName(dbName, "no.such.schema")); + Database db = createUniqueDatabaseForTest(); + ISchema schema = objectStore.getISchema(new ISchemaName(db.getCatalogName(), db.getName(), "no.such.schema")); Assert.assertNull(schema); String schemaName = "schema1"; @@ -76,7 +80,7 @@ public void iSchema() throws TException { schema = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName) - .setDbName(dbName) + .inDb(db) .setCompatibility(SchemaCompatibility.FORWARD) .setValidationLevel(SchemaValidation.LATEST) .setCanEvolve(false) @@ -85,7 +89,7 @@ public void iSchema() throws TException { .build(); objectStore.createISchema(schema); - schema = objectStore.getISchema(new ISchemaName(dbName, schemaName)); + schema = objectStore.getISchema(new ISchemaName(db.getCatalogName(), db.getName(), schemaName)); Assert.assertNotNull(schema); Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType()); @@ -103,9 +107,9 @@ public void iSchema() throws TException { schema.setCanEvolve(true); schema.setSchemaGroup(schemaGroup); schema.setDescription(description); - objectStore.alterISchema(new ISchemaName(dbName, schemaName), schema); + objectStore.alterISchema(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), schema); - schema = objectStore.getISchema(new ISchemaName(dbName, schemaName)); + schema = objectStore.getISchema(new ISchemaName(db.getCatalogName(), db.getName(), schemaName)); Assert.assertNotNull(schema); Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType()); @@ -116,8 +120,8 @@ public void iSchema() throws TException { Assert.assertEquals(schemaGroup, schema.getSchemaGroup()); Assert.assertEquals(description, schema.getDescription()); - objectStore.dropISchema(new ISchemaName(dbName, schemaName)); - schema = objectStore.getISchema(new ISchemaName(dbName, schemaName)); + objectStore.dropISchema(new ISchemaName(db.getCatalogName(), db.getName(), schemaName)); + schema = objectStore.getISchema(new ISchemaName(db.getCatalogName(), db.getName(), schemaName)); Assert.assertNull(schema); } @@ -134,16 +138,16 @@ public void schemaWithInvalidDatabase() throws MetaException, AlreadyExistsExcep @Test(expected = AlreadyExistsException.class) public void schemaAlreadyExists() throws TException { - String dbName = createUniqueDatabaseForTest(); + Database db = createUniqueDatabaseForTest(); String schemaName = "schema2"; ISchema schema = new ISchemaBuilder() .setSchemaType(SchemaType.HIVE) .setName(schemaName) - .setDbName(dbName) + .inDb(db) .build(); objectStore.createISchema(schema); - schema = objectStore.getISchema(new ISchemaName(dbName, schemaName)); + schema = objectStore.getISchema(new ISchemaName(db.getCatalogName(), db.getName(), schemaName)); Assert.assertNotNull(schema); Assert.assertEquals(SchemaType.HIVE, schema.getSchemaType()); @@ -164,12 +168,12 @@ public void alterNonExistentSchema() throws MetaException, NoSuchObjectException .setName(schemaName) .setDescription("a new description") .build(); - objectStore.alterISchema(new ISchemaName(DEFAULT_DATABASE_NAME, schemaName), schema); + objectStore.alterISchema(new ISchemaName(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName), schema); } @Test(expected = NoSuchObjectException.class) public void dropNonExistentSchema() throws MetaException, NoSuchObjectException { - objectStore.dropISchema(new ISchemaName(DEFAULT_DATABASE_NAME, "no_such_schema")); + objectStore.dropISchema(new ISchemaName(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "no_such_schema")); } @Test(expected = NoSuchObjectException.class) @@ -177,7 +181,6 @@ public void createVersionOfNonExistentSchema() throws MetaException, AlreadyExis NoSuchObjectException, InvalidObjectException { SchemaVersion schemaVersion = new SchemaVersionBuilder() .setSchemaName("noSchemaOfThisNameExists") - .setDbName(DEFAULT_DATABASE_NAME) .setVersion(1) .addCol("a", ColumnType.STRING_TYPE_NAME) .build(); @@ -186,16 +189,16 @@ public void createVersionOfNonExistentSchema() throws MetaException, AlreadyExis @Test public void addSchemaVersion() throws TException { - String dbName = createUniqueDatabaseForTest(); + Database db = createUniqueDatabaseForTest(); String schemaName = "schema37"; int version = 1; - SchemaVersion schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + SchemaVersion schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version)); Assert.assertNull(schemaVersion); ISchema schema = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName) - .setDbName(dbName) + .inDb(db) .build(); objectStore.createISchema(schema); @@ -226,10 +229,11 @@ public void addSchemaVersion() throws TException { .build(); objectStore.addSchemaVersion(schemaVersion); - schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version)); Assert.assertNotNull(schemaVersion); Assert.assertEquals(schemaName, schemaVersion.getSchema().getSchemaName()); - Assert.assertEquals(dbName, schemaVersion.getSchema().getDbName()); + Assert.assertEquals(db.getName(), schemaVersion.getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), schemaVersion.getSchema().getCatName()); Assert.assertEquals(version, schemaVersion.getVersion()); Assert.assertEquals(creationTime, schemaVersion.getCreatedAt()); Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState()); @@ -249,21 +253,21 @@ public void addSchemaVersion() throws TException { Assert.assertEquals("b", cols.get(1).getName()); Assert.assertEquals(ColumnType.FLOAT_TYPE_NAME, cols.get(1).getType()); - objectStore.dropSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); - schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + objectStore.dropSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version)); + schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version)); Assert.assertNull(schemaVersion); } // Test that adding multiple versions of the same schema @Test public void multipleSchemaVersions() throws TException { - String dbName = createUniqueDatabaseForTest(); + Database db = createUniqueDatabaseForTest(); String schemaName = "schema195"; ISchema schema = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName) - .setDbName(dbName) + .inDb(db) .build(); objectStore.createISchema(schema); SchemaVersion schemaVersion = new SchemaVersionBuilder() @@ -290,7 +294,7 @@ public void multipleSchemaVersions() throws TException { .build(); objectStore.addSchemaVersion(schemaVersion); - schemaVersion = objectStore.getLatestSchemaVersion(new ISchemaName(dbName, schemaName)); + schemaVersion = objectStore.getLatestSchemaVersion(new ISchemaName(db.getCatalogName(), db.getName(), schemaName)); Assert.assertEquals(3, schemaVersion.getVersion()); Assert.assertEquals(3, schemaVersion.getColsSize()); List cols = schemaVersion.getCols(); @@ -302,14 +306,14 @@ public void multipleSchemaVersions() throws TException { Assert.assertEquals(ColumnType.DATE_TYPE_NAME, cols.get(1).getType()); Assert.assertEquals(ColumnType.TIMESTAMP_TYPE_NAME, cols.get(2).getType()); - schemaVersion = objectStore.getLatestSchemaVersion(new ISchemaName(dbName, "no.such.schema.with.this.name")); + schemaVersion = objectStore.getLatestSchemaVersion(new ISchemaName(db.getCatalogName(), db.getName(), "no.such.schema.with.this.name")); Assert.assertNull(schemaVersion); List versions = - objectStore.getAllSchemaVersion(new ISchemaName(dbName, "there.really.isnt.a.schema.named.this")); + objectStore.getAllSchemaVersion(new ISchemaName(db.getCatalogName(), db.getName(), "there.really.isnt.a.schema.named.this")); Assert.assertNull(versions); - versions = objectStore.getAllSchemaVersion(new ISchemaName(dbName, schemaName)); + versions = objectStore.getAllSchemaVersion(new ISchemaName(db.getCatalogName(), db.getName(), schemaName)); Assert.assertEquals(3, versions.size()); versions.sort(Comparator.comparingInt(SchemaVersion::getVersion)); Assert.assertEquals(1, versions.get(0).getVersion()); @@ -339,16 +343,16 @@ public void multipleSchemaVersions() throws TException { @Test(expected = AlreadyExistsException.class) public void addDuplicateSchemaVersion() throws TException { - String dbName = createUniqueDatabaseForTest(); + Database db = createUniqueDatabaseForTest(); String schemaName = "schema1234"; int version = 1; - SchemaVersion schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + SchemaVersion schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version)); Assert.assertNull(schemaVersion); ISchema schema = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName) - .setDbName(dbName) + .inDb(db) .build(); objectStore.createISchema(schema); @@ -365,16 +369,16 @@ public void addDuplicateSchemaVersion() throws TException { @Test public void alterSchemaVersion() throws TException { - String dbName = createUniqueDatabaseForTest(); + Database db = createUniqueDatabaseForTest(); String schemaName = "schema371234"; int version = 1; - SchemaVersion schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + SchemaVersion schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version)); Assert.assertNull(schemaVersion); ISchema schema = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName) - .setDbName(dbName) + .inDb(db) .build(); objectStore.createISchema(schema); @@ -387,10 +391,11 @@ public void alterSchemaVersion() throws TException { .build(); objectStore.addSchemaVersion(schemaVersion); - schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version)); Assert.assertNotNull(schemaVersion); Assert.assertEquals(schemaName, schemaVersion.getSchema().getSchemaName()); - Assert.assertEquals(dbName, schemaVersion.getSchema().getDbName()); + Assert.assertEquals(db.getName(), schemaVersion.getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), schemaVersion.getSchema().getCatName()); Assert.assertEquals(version, schemaVersion.getVersion()); Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState()); @@ -402,12 +407,13 @@ public void alterSchemaVersion() throws TException { serde.setSerializerClass(serializer); serde.setDeserializerClass(deserializer); schemaVersion.setSerDe(serde); - objectStore.alterSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version), schemaVersion); + objectStore.alterSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version), schemaVersion); - schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version)); Assert.assertNotNull(schemaVersion); Assert.assertEquals(schemaName, schemaVersion.getSchema().getSchemaName()); - Assert.assertEquals(dbName, schemaVersion.getSchema().getDbName()); + Assert.assertEquals(db.getName(), schemaVersion.getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), schemaVersion.getSchema().getCatName()); Assert.assertEquals(version, schemaVersion.getVersion()); Assert.assertEquals(SchemaVersionState.REVIEWED, schemaVersion.getState()); Assert.assertEquals(serdeName, schemaVersion.getSerDe().getName()); @@ -428,22 +434,22 @@ public void alterNonExistentSchemaVersion() throws MetaException, AlreadyExistsE .addCol("b", ColumnType.FLOAT_TYPE_NAME) .setState(SchemaVersionState.INITIATED) .build(); - objectStore.alterSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(DEFAULT_DATABASE_NAME, schemaName), version), schemaVersion); + objectStore.alterSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName), version), schemaVersion); } @Test(expected = NoSuchObjectException.class) public void dropNonExistentSchemaVersion() throws NoSuchObjectException, MetaException { - objectStore.dropSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(DEFAULT_DATABASE_NAME, "ther is no schema named this"), 23)); + objectStore.dropSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "ther is no schema named this"), 23)); } @Test public void schemaQuery() throws TException { - String dbName = createUniqueDatabaseForTest(); + Database db = createUniqueDatabaseForTest(); String schemaName1 = "a_schema1"; ISchema schema1 = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName1) - .setDbName(dbName) + .inDb(db) .build(); objectStore.createISchema(schema1); @@ -451,7 +457,7 @@ public void schemaQuery() throws TException { ISchema schema2 = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName2) - .setDbName(dbName) + .inDb(db) .build(); objectStore.createISchema(schema2); @@ -497,7 +503,8 @@ public void schemaQuery() throws TException { results = objectStore.getSchemaVersionsByColumns("gamma", null, null); Assert.assertEquals(1, results.size()); Assert.assertEquals(schemaName1, results.get(0).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(0).getSchema().getCatName()); Assert.assertEquals(2, results.get(0).getVersion()); // fetch 2 in same schema @@ -505,10 +512,12 @@ public void schemaQuery() throws TException { Assert.assertEquals(2, results.size()); Collections.sort(results); Assert.assertEquals(schemaName1, results.get(0).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(0).getSchema().getCatName()); Assert.assertEquals(1, results.get(0).getVersion()); Assert.assertEquals(schemaName1, results.get(1).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(1).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(1).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(1).getSchema().getCatName()); Assert.assertEquals(2, results.get(1).getVersion()); // fetch across schemas @@ -516,16 +525,20 @@ public void schemaQuery() throws TException { Assert.assertEquals(4, results.size()); Collections.sort(results); Assert.assertEquals(schemaName1, results.get(0).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(0).getSchema().getCatName()); Assert.assertEquals(1, results.get(0).getVersion()); Assert.assertEquals(schemaName1, results.get(1).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(1).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(1).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(1).getSchema().getCatName()); Assert.assertEquals(2, results.get(1).getVersion()); Assert.assertEquals(schemaName2, results.get(2).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(2).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(2).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(2).getSchema().getCatName()); Assert.assertEquals(1, results.get(2).getVersion()); Assert.assertEquals(schemaName2, results.get(3).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(3).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(3).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(3).getSchema().getCatName()); Assert.assertEquals(2, results.get(3).getVersion()); // fetch by namespace @@ -533,10 +546,12 @@ public void schemaQuery() throws TException { Assert.assertEquals(2, results.size()); Collections.sort(results); Assert.assertEquals(schemaName1, results.get(0).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(0).getSchema().getCatName()); Assert.assertEquals(2, results.get(0).getVersion()); Assert.assertEquals(schemaName2, results.get(1).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(1).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(1).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(1).getSchema().getCatName()); Assert.assertEquals(2, results.get(1).getVersion()); // fetch by name and type @@ -544,10 +559,12 @@ public void schemaQuery() throws TException { Assert.assertEquals(2, results.size()); Collections.sort(results); Assert.assertEquals(schemaName2, results.get(0).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(0).getSchema().getCatName()); Assert.assertEquals(1, results.get(0).getVersion()); Assert.assertEquals(schemaName2, results.get(1).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(1).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(1).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(1).getSchema().getCatName()); Assert.assertEquals(2, results.get(1).getVersion()); // Make sure matching name but wrong type doesn't return @@ -560,14 +577,26 @@ public void schemaVersionQueryNoNameOrNamespace() throws MetaException { } private static int dbNum = 1; - private String createUniqueDatabaseForTest() throws MetaException, InvalidObjectException { + private static Random rand = new Random(); + private Database createUniqueDatabaseForTest() throws MetaException, InvalidObjectException { + String catName; + if (rand.nextDouble() < 0.5) { + catName = "unique_cat_for_test_" + dbNum++; + objectStore.createCatalog(new CatalogBuilder() + .setName(catName) + .setLocation("there") + .build()); + } else { + catName = DEFAULT_CATALOG_NAME; + } String dbName = "uniquedbfortest" + dbNum++; Database db = new DatabaseBuilder() .setName(dbName) + .setCatalogName(catName) .setLocation("somewhere") .setDescription("descriptive") - .build(); + .build(conf); objectStore.createDatabase(db); - return dbName; + return db; } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java index f286da824d..49033d3943 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; @@ -42,6 +43,7 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; import org.junit.After; @@ -52,9 +54,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + @Category(MetastoreUnitTest.class) public class TestOldSchema { private ObjectStore store = null; + private Configuration conf; private static final Logger LOG = LoggerFactory.getLogger(TestOldSchema.class.getName()); @@ -91,13 +96,14 @@ public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) { @Before public void setUp() throws Exception { - Configuration conf = MetastoreConf.newMetastoreConf(); + conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.STATS_FETCH_BITVECTOR, false); MetaStoreTestUtils.setConfForStandloneMode(conf); store = new ObjectStore(); store.setConf(conf); dropAllStoreObjects(store); + HiveMetaStore.HMSHandler.createDefaultCatalog(store, new Warehouse(conf)); HyperLogLog hll = HyperLogLog.builder().build(); hll.addLong(1); @@ -121,7 +127,11 @@ public void tearDown() { public void testPartitionOps() throws Exception { String dbName = "default"; String tableName = "snp"; - Database db1 = new Database(dbName, "description", "locationurl", null); + Database db1 = new DatabaseBuilder() + .setName(dbName) + .setDescription("description") + .setLocation("locationurl") + .build(conf); store.createDatabase(db1); long now = System.currentTimeMillis(); List cols = new ArrayList<>(); @@ -143,6 +153,7 @@ public void testPartitionOps() throws Exception { psd.setLocation("file:/tmp/default/hit/ds=" + partVal); Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, Collections.emptyMap()); + part.setCatName(DEFAULT_CATALOG_NAME); store.addPartition(part); ColumnStatistics cs = new ColumnStatistics(); ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); @@ -185,7 +196,7 @@ public void checkStats(AggrStats aggrStats) throws Exception { for (int i = 0; i < 10; i++) { partNames.add("ds=" + i); } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, + AggrStats aggrStats = store.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tableName, partNames, Arrays.asList("col1")); statChecker.checkStats(aggrStats); @@ -200,18 +211,18 @@ private static void dropAllStoreObjects(RawStore store) throws MetaException, try { Deadline.registerIfNot(100000); Deadline.startTimer("getPartition"); - List dbs = store.getAllDatabases(); + List dbs = store.getAllDatabases(DEFAULT_CATALOG_NAME); for (int i = 0; i < dbs.size(); i++) { String db = dbs.get(i); - List tbls = store.getAllTables(db); + List tbls = store.getAllTables(DEFAULT_CATALOG_NAME, db); for (String tbl : tbls) { - List parts = store.getPartitions(db, tbl, 100); + List parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100); for (Partition part : parts) { - store.dropPartition(db, tbl, part.getValues()); + store.dropPartition(DEFAULT_CATALOG_NAME, db, tbl, part.getValues()); } - store.dropTable(db, tbl); + store.dropTable(DEFAULT_CATALOG_NAME, db, tbl); } - store.dropDatabase(db); + store.dropDatabase(DEFAULT_CATALOG_NAME, db); } } catch (NoSuchObjectException e) { } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java index 6b7d91318a..3d1723edb8 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; @@ -57,9 +58,9 @@ public void setUp() throws Exception { @Test public void testIpAddress() throws Exception { - Database db = new Database(); - db.setName("testIpAddressIp"); - msc.createDatabase(db); + Database db = new DatabaseBuilder() + .setName("testIpAddressIp") + .create(msc, conf); msc.dropDatabase(db.getName()); } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java index 11f84f2b60..930e996a29 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; @@ -63,19 +64,17 @@ public void testRetryingHMSHandler() throws Exception { String dbName = "hive4159"; String tblName = "tmptbl"; - Database db = new Database(); - db.setName(dbName); - msc.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(msc, conf); Assert.assertEquals(2, AlternateFailurePreListener.getCallCount()); - Table tbl = new TableBuilder() + new TableBuilder() .setDbName(dbName) .setTableName(tblName) .addCol("c1", ColumnType.STRING_TYPE_NAME) - .build(); - - msc.createTable(tbl); + .create(msc, conf); Assert.assertEquals(4, AlternateFailurePreListener.getCallCount()); } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java new file mode 100644 index 0000000000..6cca062268 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java @@ -0,0 +1,728 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Date; +import org.apache.hadoop.hive.metastore.api.DateColumnStatsData; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +@Category(MetastoreCheckinTest.class) +public class TestStats { + private static final Logger LOG = LoggerFactory.getLogger(TestStats.class); + + private static final String NO_CAT = "DO_NOT_USE_A_CATALOG!"; + + private IMetaStoreClient client; + private Configuration conf; + + @Before + public void setUp() throws MetaException { + conf = MetastoreConf.newMetastoreConf(); + MetaStoreTestUtils.setConfForStandloneMode(conf); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.AGGREGATE_STATS_CACHE_ENABLED, false); + // Get new client + client = new HiveMetaStoreClient(conf); + } + + @After + public void tearDown() throws TException { + // Drop any left over catalogs + List catalogs = client.getCatalogs(); + for (String catName : catalogs) { + if (!catName.equalsIgnoreCase(DEFAULT_CATALOG_NAME)) { + // First drop any databases in catalog + List databases = client.getAllDatabases(catName); + for (String db : databases) { + client.dropDatabase(catName, db, true, false, true); + } + client.dropCatalog(catName); + } else { + List databases = client.getAllDatabases(catName); + for (String db : databases) { + if (!db.equalsIgnoreCase(Warehouse.DEFAULT_DATABASE_NAME)) { + client.dropDatabase(catName, db, true, false, true); + } + } + } + } + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + private Map buildAllColumns() { + Map colMap = new HashMap<>(6); + Column[] cols = { new BinaryColumn(), new BooleanColumn(), new DateColumn(), + new DoubleColumn(), new LongColumn(), new StringColumn() }; + for (Column c : cols) colMap.put(c.colName, c); + return colMap; + } + + private List createMetadata(String catName, String dbName, String tableName, + String partKey, List partVals, + Map colMap) + throws TException { + if (!DEFAULT_CATALOG_NAME.equals(catName) && !NO_CAT.equals(catName)) { + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + } + + Database db; + if (!DEFAULT_DATABASE_NAME.equals(dbName)) { + DatabaseBuilder dbBuilder = new DatabaseBuilder() + .setName(dbName); + if (!NO_CAT.equals(catName)) dbBuilder.setCatalogName(catName); + db = dbBuilder.create(client, conf); + } else { + db = client.getDatabase(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME); + } + + TableBuilder tb = new TableBuilder() + .inDb(db) + .setTableName(tableName); + + for (Column col : colMap.values()) tb.addCol(col.colName, col.colType); + + if (partKey != null) { + assert partVals != null && !partVals.isEmpty() : + "Must provide partition values for partitioned table"; + tb.addPartCol(partKey, ColumnType.STRING_TYPE_NAME); + } + Table table = tb.create(client, conf); + + if (partKey != null) { + for (String partVal : partVals) { + new PartitionBuilder() + .inTable(table) + .addValue(partVal) + .addToTable(client, conf); + } + } + + SetPartitionsStatsRequest rqst = new SetPartitionsStatsRequest(); + List partNames = new ArrayList<>(); + if (partKey == null) { + rqst.addToColStats(buildStatsForOneTableOrPartition(catName, dbName, tableName, null, + colMap.values())); + } else { + for (String partVal : partVals) { + String partName = partKey + "=" + partVal; + rqst.addToColStats(buildStatsForOneTableOrPartition(catName, dbName, tableName, partName, + colMap.values())); + partNames.add(partName); + } + } + client.setPartitionColumnStatistics(rqst); + return partNames; + } + + private ColumnStatistics buildStatsForOneTableOrPartition(String catName, String dbName, + String tableName, String partName, + Collection cols) { + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(partName == null, dbName, tableName); + if (!NO_CAT.equals(catName)) desc.setCatName(catName); + if (partName != null) desc.setPartName(partName); + + List objs = new ArrayList<>(cols.size()); + + for (Column col : cols) objs.add(col.generate()); + + return new ColumnStatistics(desc, objs); + } + + private void dropStats(String catName, String dbName, String tableName, String partName, + Collection colNames) + throws TException { + for (String colName : colNames) { + if (partName == null) { + if (NO_CAT.equals(catName)) client.deleteTableColumnStatistics(dbName, tableName, colName); + else client.deleteTableColumnStatistics(catName, dbName, tableName, colName); + } else { + if (NO_CAT.equals(catName)) client.deletePartitionColumnStatistics(dbName, tableName, partName, colName); + else client.deletePartitionColumnStatistics(catName, dbName, tableName, partName, colName); + } + } + } + + private void compareStatsForTable(String catName, String dbName, String tableName, + Map colMap) throws TException { + List objs = catName.equals(NO_CAT) ? + client.getTableColumnStatistics(dbName, tableName, new ArrayList<>(colMap.keySet())) : + client.getTableColumnStatistics(catName, dbName, tableName, new ArrayList<>(colMap.keySet())); + compareStatsForOneTableOrPartition(objs, 0, colMap); + } + + private void compareStatsForPartitions(String catName, String dbName, String tableName, + List partNames, final Map colMap) + throws TException { + Map> partObjs = catName.equals(NO_CAT) ? + client.getPartitionColumnStatistics(dbName, tableName, partNames, new ArrayList<>(colMap.keySet())) : + client.getPartitionColumnStatistics(catName, dbName, tableName, partNames, new ArrayList<>(colMap.keySet())); + for (int i = 0; i < partNames.size(); i++) { + compareStatsForOneTableOrPartition(partObjs.get(partNames.get(i)), i, colMap); + } + AggrStats aggr = catName.equals(NO_CAT) ? + client.getAggrColStatsFor(dbName, tableName, new ArrayList<>(colMap.keySet()), partNames) : + client.getAggrColStatsFor(catName, dbName, tableName, new ArrayList<>(colMap.keySet()), partNames); + Assert.assertEquals(partNames.size(), aggr.getPartsFound()); + Assert.assertEquals(colMap.size(), aggr.getColStatsSize()); + aggr.getColStats().forEach(cso -> colMap.get(cso.getColName()).compareAggr(cso)); + } + + private void compareStatsForOneTableOrPartition(List objs, + final int partOffset, + final Map colMap) + throws TException { + Assert.assertEquals(objs.size(), colMap.size()); + objs.forEach(cso -> colMap.get(cso.getColName()).compare(cso, partOffset)); + } + + @Test + public void tableInHiveCatalog() throws TException { + String dbName = "db_table_stats"; + String tableName = "table_in_default_db_stats"; + Map colMap = buildAllColumns(); + createMetadata(DEFAULT_CATALOG_NAME, dbName, tableName, null, null, colMap); + compareStatsForTable(DEFAULT_CATALOG_NAME, dbName, tableName, colMap); + dropStats(DEFAULT_CATALOG_NAME, dbName, tableName, null, colMap.keySet()); + } + + @Test + public void partitionedTableInHiveCatalog() throws TException { + String dbName = "db_part_stats"; + String tableName = "partitioned_table_in_default_db_stats"; + Map colMap = buildAllColumns(); + List partNames = createMetadata(DEFAULT_CATALOG_NAME, dbName, tableName, "pk", + Arrays.asList("a1", "a2", "a3"), colMap); + compareStatsForPartitions(DEFAULT_CATALOG_NAME, dbName, tableName, partNames, colMap); + for (String partName : partNames) { + dropStats(DEFAULT_CATALOG_NAME, dbName, tableName, partName, colMap.keySet()); + } + } + + @Test + public void tableOtherCatalog() throws TException { + String catName = "cat_table_stats"; + String dbName = "other_cat_db_table_stats"; + String tableName = "table_in_default_db_stats"; + Map colMap = buildAllColumns(); + createMetadata(catName, dbName, tableName, null, null, colMap); + compareStatsForTable(catName, dbName, tableName, colMap); + dropStats(catName, dbName, tableName, null, colMap.keySet()); + } + + @Test + public void partitionedTableOtherCatalog() throws TException { + String catName = "cat_table_stats"; + String dbName = "other_cat_db_part_stats"; + String tableName = "partitioned_table_in_default_db_stats"; + Map colMap = buildAllColumns(); + List partNames = createMetadata(catName, dbName, tableName, "pk", + Arrays.asList("a1", "a2", "a3"), colMap); + compareStatsForPartitions(catName, dbName, tableName, partNames, colMap); + for (String partName : partNames) { + dropStats(catName, dbName, tableName, partName, colMap.keySet()); + } + } + + @Test + public void tableDeprecatedCalls() throws TException { + String dbName = "old_db_table_stats"; + String tableName = "table_in_default_db_stats"; + Map colMap = buildAllColumns(); + createMetadata(NO_CAT, dbName, tableName, null, null, colMap); + compareStatsForTable(NO_CAT, dbName, tableName, colMap); + dropStats(NO_CAT, dbName, tableName, null, colMap.keySet()); + } + + @Test + public void partitionedTableDeprecatedCalls() throws TException { + String dbName = "old_db_part_stats"; + String tableName = "partitioned_table_in_default_db_stats"; + Map colMap = buildAllColumns(); + List partNames = createMetadata(NO_CAT, dbName, tableName, "pk", + Arrays.asList("a1", "a2", "a3"), colMap); + compareStatsForPartitions(NO_CAT, dbName, tableName, partNames, colMap); + for (String partName : partNames) { + dropStats(NO_CAT, dbName, tableName, partName, colMap.keySet()); + } + } + + private abstract class Column { + final String colName; + final String colType; + + Random rand = new Random(); + + List maxLens, numNulls, numDvs; + List avgLens; + + + public Column(String colName, String colType) { + this.colName = colName; + this.colType = colType; + maxLens = new ArrayList<>(); + numNulls = new ArrayList<>(); + avgLens = new ArrayList<>(); + numDvs = new ArrayList<>(); + } + + abstract ColumnStatisticsObj generate(); + abstract void compare(ColumnStatisticsObj obj, int offset); + abstract void compareAggr(ColumnStatisticsObj obj); + + void compareCommon(ColumnStatisticsObj obj) { + Assert.assertEquals(colName, obj.getColName()); + Assert.assertEquals(colType, obj.getColType()); + } + + long genMaxLen() { + return genPositiveLong(maxLens); + } + + long getMaxLen() { + return maxLong(maxLens); + } + + long genNumNulls() { + return genPositiveLong(numNulls); + } + + long genNumDvs() { + return genPositiveLong(numDvs); + } + + long getNumNulls() { + return sumLong(numNulls); + } + + long getNumDvs() { + return maxLong(numDvs); + } + + double genAvgLens() { + return genDouble(avgLens); + } + + double getAvgLen() { + return maxDouble(avgLens); + } + + protected long genNegativeLong(List addTo) { + long val = rand.nextInt(100); + if (val > 0) val *= -1; + addTo.add(val); + return val; + } + + protected long genPositiveLong(List addTo) { + long val = rand.nextInt(100); + val = Math.abs(val) + 1; // make sure it isn't 0 + addTo.add(val); + return val; + } + + protected long maxLong(List maxOf) { + long max = Long.MIN_VALUE; + for (long maybe : maxOf) max = Math.max(max, maybe); + return max; + } + + protected long sumLong(List sumOf) { + long sum = 0; + for (long element : sumOf) sum += element; + return sum; + } + + protected double genDouble(List addTo) { + double val = rand.nextDouble() * rand.nextInt(100); + addTo.add(val); + return val; + } + + protected double maxDouble(List maxOf) { + double max = Double.MIN_VALUE; + for (double maybe : maxOf) max = Math.max(max, maybe); + return max; + } + + } + + private class BinaryColumn extends Column { + public BinaryColumn() { + super("bincol", ColumnType.BINARY_TYPE_NAME); + } + + @Override + ColumnStatisticsObj generate() { + BinaryColumnStatsData binData = new BinaryColumnStatsData(genMaxLen(), genAvgLens(), genNumNulls()); + ColumnStatisticsData data = new ColumnStatisticsData(); + data.setBinaryStats(binData); + return new ColumnStatisticsObj(colName, colType, data); + } + + @Override + void compare(ColumnStatisticsObj obj, int offset) { + compareCommon(obj); + Assert.assertEquals("binary max length", maxLens.get(offset), + (Long)obj.getStatsData().getBinaryStats().getMaxColLen()); + Assert.assertEquals("binary min length", avgLens.get(offset), obj.getStatsData().getBinaryStats().getAvgColLen(), 0.01); + Assert.assertEquals("binary num nulls", numNulls.get(offset), (Long)obj.getStatsData().getBinaryStats().getNumNulls()); + } + + @Override + void compareAggr(ColumnStatisticsObj obj) { + compareCommon(obj); + Assert.assertEquals("aggr binary max length", getMaxLen(), obj.getStatsData().getBinaryStats().getMaxColLen()); + Assert.assertEquals("aggr binary min length", getAvgLen(), obj.getStatsData().getBinaryStats().getAvgColLen(), 0.01); + Assert.assertEquals("aggr binary num nulls", getNumNulls(), obj.getStatsData().getBinaryStats().getNumNulls()); + } + } + + private class BooleanColumn extends Column { + private List numTrues, numFalses; + + public BooleanColumn() { + super("boolcol", ColumnType.BOOLEAN_TYPE_NAME); + numTrues = new ArrayList<>(); + numFalses = new ArrayList<>(); + } + + @Override + ColumnStatisticsObj generate() { + BooleanColumnStatsData + boolData = new BooleanColumnStatsData(genNumTrues(), genNumFalses(), genNumNulls()); + ColumnStatisticsData data = new ColumnStatisticsData(); + data.setBooleanStats(boolData); + return new ColumnStatisticsObj(colName, colType, data); + } + + @Override + void compare(ColumnStatisticsObj obj, int offset) { + compareCommon(obj); + Assert.assertEquals("boolean num trues", numTrues.get(offset), (Long)obj.getStatsData().getBooleanStats().getNumTrues()); + Assert.assertEquals("boolean num falses", numFalses.get(offset), (Long)obj.getStatsData().getBooleanStats().getNumFalses()); + Assert.assertEquals("boolean num nulls", numNulls.get(offset), (Long)obj.getStatsData().getBooleanStats().getNumNulls()); + } + + @Override + void compareAggr(ColumnStatisticsObj obj) { + compareCommon(obj); + Assert.assertEquals("aggr boolean num trues", getNumTrues(), obj.getStatsData().getBooleanStats().getNumTrues()); + Assert.assertEquals("aggr boolean num falses", getNumFalses(), obj.getStatsData().getBooleanStats().getNumFalses()); + Assert.assertEquals("aggr boolean num nulls", getNumNulls(), obj.getStatsData().getBooleanStats().getNumNulls()); + } + + private long genNumTrues() { + return genPositiveLong(numTrues); + } + + private long genNumFalses() { + return genPositiveLong(numFalses); + } + + private long getNumTrues() { + return sumLong(numTrues); + } + + private long getNumFalses() { + return sumLong(numFalses); + } + } + + private class DateColumn extends Column { + private List lowVals, highVals; + + public DateColumn() { + super("datecol", ColumnType.DATE_TYPE_NAME); + lowVals = new ArrayList<>(); + highVals = new ArrayList<>(); + } + + @Override + ColumnStatisticsObj generate() { + DateColumnStatsData dateData = new DateColumnStatsData(genNumNulls(), genNumDvs()); + dateData.setLowValue(genLowValue()); + dateData.setHighValue(genHighValue()); + ColumnStatisticsData data = new ColumnStatisticsData(); + data.setDateStats(dateData); + return new ColumnStatisticsObj(colName, colType, data); + } + + @Override + void compare(ColumnStatisticsObj obj, int offset) { + compareCommon(obj); + Assert.assertEquals("date num nulls", numNulls.get(offset), (Long)obj.getStatsData().getDateStats().getNumNulls()); + Assert.assertEquals("date num dvs", numDvs.get(offset), (Long)obj.getStatsData().getDateStats().getNumDVs()); + Assert.assertEquals("date low val", lowVals.get(offset), obj.getStatsData().getDateStats().getLowValue()); + Assert.assertEquals("date high val", highVals.get(offset), obj.getStatsData().getDateStats().getHighValue()); + } + + @Override + void compareAggr(ColumnStatisticsObj obj) { + compareCommon(obj); + Assert.assertEquals("aggr date num nulls", getNumNulls(), obj.getStatsData().getDateStats().getNumNulls()); + Assert.assertEquals("aggr date num dvs", getNumDvs(), obj.getStatsData().getDateStats().getNumDVs()); + Assert.assertEquals("aggr date low val", getLowVal(), obj.getStatsData().getDateStats().getLowValue()); + Assert.assertEquals("aggr date high val", getHighVal(), obj.getStatsData().getDateStats().getHighValue()); + } + + private Date genLowValue() { + Date d = new Date(rand.nextInt(100) * -1); + lowVals.add(d); + return d; + } + + private Date genHighValue() { + Date d = new Date(rand.nextInt(200)); + highVals.add(d); + return d; + } + + private Date getLowVal() { + long min = Long.MAX_VALUE; + for (Date d : lowVals) min = Math.min(min, d.getDaysSinceEpoch()); + return new Date(min); + } + + private Date getHighVal() { + long max = Long.MIN_VALUE; + for (Date d : highVals) max = Math.max(max, d.getDaysSinceEpoch()); + return new Date(max); + } + } + + private class DoubleColumn extends Column { + List lowVals, highVals; + + public DoubleColumn() { + super("doublecol", ColumnType.DOUBLE_TYPE_NAME); + lowVals = new ArrayList<>(); + highVals = new ArrayList<>(); + } + + @Override + ColumnStatisticsObj generate() { + DoubleColumnStatsData doubleData = new DoubleColumnStatsData(genNumNulls(), genNumDvs()); + doubleData.setLowValue(genLowVal()); + doubleData.setHighValue(genHighVal()); + ColumnStatisticsData data = new ColumnStatisticsData(); + data.setDoubleStats(doubleData); + return new ColumnStatisticsObj(colName, colType, data); + } + + @Override + void compare(ColumnStatisticsObj obj, int offset) { + compareCommon(obj); + Assert.assertEquals("double num nulls", numNulls.get(offset), + (Long)obj.getStatsData().getDoubleStats().getNumNulls()); + Assert.assertEquals("double num dvs", numDvs.get(offset), + (Long)obj.getStatsData().getDoubleStats().getNumDVs()); + Assert.assertEquals("double low val", lowVals.get(offset), + obj.getStatsData().getDoubleStats().getLowValue(), 0.01); + Assert.assertEquals("double high val", highVals.get(offset), + obj.getStatsData().getDoubleStats().getHighValue(), 0.01); + } + + @Override + void compareAggr(ColumnStatisticsObj obj) { + compareCommon(obj); + Assert.assertEquals("aggr double num nulls", getNumNulls(), + obj.getStatsData().getDoubleStats().getNumNulls()); + Assert.assertEquals("aggr double num dvs", getNumDvs(), + obj.getStatsData().getDoubleStats().getNumDVs()); + Assert.assertEquals("aggr double low val", getLowVal(), + obj.getStatsData().getDoubleStats().getLowValue(), 0.01); + Assert.assertEquals("aggr double high val", getHighVal(), + obj.getStatsData().getDoubleStats().getHighValue(), 0.01); + + } + + private double genLowVal() { + return genDouble(lowVals); + } + + private double genHighVal() { + return genDouble(highVals); + } + + private double getLowVal() { + double min = Double.MAX_VALUE; + for (Double d : lowVals) min = Math.min(min, d); + return min; + } + + private double getHighVal() { + return maxDouble(highVals); + } + } + + private class LongColumn extends Column { + List lowVals, highVals; + + public LongColumn() { + super("bigintcol", ColumnType.BIGINT_TYPE_NAME); + lowVals = new ArrayList<>(); + highVals = new ArrayList<>(); + } + + @Override + ColumnStatisticsObj generate() { + LongColumnStatsData longData = new LongColumnStatsData(genNumNulls(), genNumDvs()); + longData.setLowValue(genLowVal()); + longData.setHighValue(genHighVal()); + ColumnStatisticsData data = new ColumnStatisticsData(); + data.setLongStats(longData); + return new ColumnStatisticsObj(colName, colType, data); + } + + @Override + void compare(ColumnStatisticsObj obj, int offset) { + compareCommon(obj); + Assert.assertEquals("long num nulls", numNulls.get(offset), + (Long)obj.getStatsData().getLongStats().getNumNulls()); + Assert.assertEquals("long num dvs", numDvs.get(offset), + (Long)obj.getStatsData().getLongStats().getNumDVs()); + Assert.assertEquals("long low val", (long)lowVals.get(offset), + obj.getStatsData().getLongStats().getLowValue()); + Assert.assertEquals("long high val", (long)highVals.get(offset), + obj.getStatsData().getLongStats().getHighValue()); + } + + @Override + void compareAggr(ColumnStatisticsObj obj) { + compareCommon(obj); + Assert.assertEquals("aggr long num nulls", getNumNulls(), + obj.getStatsData().getLongStats().getNumNulls()); + Assert.assertEquals("aggr long num dvs", getNumDvs(), + obj.getStatsData().getLongStats().getNumDVs()); + Assert.assertEquals("aggr long low val", getLowVal(), + obj.getStatsData().getLongStats().getLowValue()); + Assert.assertEquals("aggr long high val", getHighVal(), + obj.getStatsData().getLongStats().getHighValue()); + } + + private long genLowVal() { + return genNegativeLong(lowVals); + } + + private long genHighVal() { + return genPositiveLong(highVals); + } + + private long getLowVal() { + long min = Long.MAX_VALUE; + for (Long val : lowVals) min = Math.min(min, val); + return min; + } + + private long getHighVal() { + return maxLong(highVals); + } + } + + private class StringColumn extends Column { + public StringColumn() { + super("strcol", ColumnType.STRING_TYPE_NAME); + } + + @Override + ColumnStatisticsObj generate() { + StringColumnStatsData strData = new StringColumnStatsData(genMaxLen(), genAvgLens(), + genNumNulls(), genNumDvs()); + ColumnStatisticsData data = new ColumnStatisticsData(); + data.setStringStats(strData); + return new ColumnStatisticsObj(colName, colType, data); + } + + @Override + void compare(ColumnStatisticsObj obj, int offset) { + compareCommon(obj); + Assert.assertEquals("str num nulls", numNulls.get(offset), + (Long)obj.getStatsData().getStringStats().getNumNulls()); + Assert.assertEquals("str num dvs", numDvs.get(offset), + (Long)obj.getStatsData().getStringStats().getNumDVs()); + Assert.assertEquals("str low val", (long)maxLens.get(offset), + obj.getStatsData().getStringStats().getMaxColLen()); + Assert.assertEquals("str high val", avgLens.get(offset), + obj.getStatsData().getStringStats().getAvgColLen(), 0.01); + } + + @Override + void compareAggr(ColumnStatisticsObj obj) { + compareCommon(obj); + Assert.assertEquals("aggr str num nulls", getNumNulls(), + obj.getStatsData().getStringStats().getNumNulls()); + Assert.assertEquals("aggr str num dvs", getNumDvs(), + obj.getStatsData().getStringStats().getNumDVs()); + Assert.assertEquals("aggr str low val", getMaxLen(), + obj.getStatsData().getStringStats().getMaxColLen()); + Assert.assertEquals("aggr str high val", getAvgLen(), + obj.getStatsData().getStringStats().getAvgColLen(), 0.01); + + } + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java index 150b6ca919..c9a6a471cb 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore; import static org.apache.commons.lang.StringUtils.repeat; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import java.lang.reflect.AccessibleObject; import java.lang.reflect.Array; @@ -50,35 +51,36 @@ public VerifyingObjectStore() { } @Override - public List getPartitionsByFilter(String dbName, String tblName, String filter, - short maxParts) throws MetaException, NoSuchObjectException { + public List getPartitionsByFilter(String catName, String dbName, String tblName, + String filter, short maxParts) + throws MetaException, NoSuchObjectException { List sqlResults = getPartitionsByFilterInternal( - dbName, tblName, filter, maxParts, true, false); + catName, dbName, tblName, filter, maxParts, true, false); List ormResults = getPartitionsByFilterInternal( - dbName, tblName, filter, maxParts, false, true); + catName, dbName, tblName, filter, maxParts, false, true); verifyLists(sqlResults, ormResults, Partition.class); return sqlResults; } @Override - public List getPartitionsByNames(String dbName, String tblName, + public List getPartitionsByNames(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { List sqlResults = getPartitionsByNamesInternal( - dbName, tblName, partNames, true, false); + catName, dbName, tblName, partNames, true, false); List ormResults = getPartitionsByNamesInternal( - dbName, tblName, partNames, false, true); + catName, dbName, tblName, partNames, false, true); verifyLists(sqlResults, ormResults, Partition.class); return sqlResults; } @Override - public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, + public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException { List ormParts = new LinkedList<>(); boolean sqlResult = getPartitionsByExprInternal( - dbName, tblName, expr, defaultPartitionName, maxParts, result, true, false); + catName, dbName, tblName, expr, defaultPartitionName, maxParts, result, true, false); boolean ormResult = getPartitionsByExprInternal( - dbName, tblName, expr, defaultPartitionName, maxParts, ormParts, false, true); + catName, dbName, tblName, expr, defaultPartitionName, maxParts, ormParts, false, true); if (sqlResult != ormResult) { String msg = "The unknown flag is different - SQL " + sqlResult + ", ORM " + ormResult; LOG.error(msg); @@ -90,32 +92,32 @@ public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, @Override public List getPartitions( - String dbName, String tableName, int maxParts) throws MetaException, NoSuchObjectException { - List sqlResults = getPartitionsInternal(dbName, tableName, maxParts, true, false); - List ormResults = getPartitionsInternal(dbName, tableName, maxParts, false, true); + String catName, String dbName, String tableName, int maxParts) throws MetaException, NoSuchObjectException { + List sqlResults = getPartitionsInternal(catName, dbName, tableName, maxParts, true, false); + List ormResults = getPartitionsInternal(catName, dbName, tableName, maxParts, false, true); verifyLists(sqlResults, ormResults, Partition.class); return sqlResults; } @Override - public ColumnStatistics getTableColumnStatistics(String dbName, + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, List colNames) throws MetaException, NoSuchObjectException { ColumnStatistics sqlResult = getTableColumnStatisticsInternal( - dbName, tableName, colNames, true, false); + catName, dbName, tableName, colNames, true, false); ColumnStatistics jdoResult = getTableColumnStatisticsInternal( - dbName, tableName, colNames, false, true); + catName, dbName, tableName, colNames, false, true); verifyObjects(sqlResult, jdoResult, ColumnStatistics.class); return sqlResult; } @Override - public List getPartitionColumnStatistics(String dbName, + public List getPartitionColumnStatistics(String catName, String dbName, String tableName, List partNames, List colNames) throws MetaException, NoSuchObjectException { List sqlResult = getPartitionColumnStatisticsInternal( - dbName, tableName, partNames, colNames, true, false); + catName, dbName, tableName, partNames, colNames, true, false); List jdoResult = getPartitionColumnStatisticsInternal( - dbName, tableName, partNames, colNames, false, true); + catName, dbName, tableName, partNames, colNames, false, true); verifyLists(sqlResult, jdoResult, ColumnStatistics.class); return sqlResult; } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java index a72fc0ba26..d451f966b0 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java @@ -28,9 +28,11 @@ import java.util.concurrent.ThreadFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; +import org.apache.hadoop.hive.metastore.HiveMetaStore; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; @@ -46,6 +48,7 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector; import org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -54,16 +57,19 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + @Category(MetastoreCheckinTest.class) public class TestCachedStore { private ObjectStore objectStore; private CachedStore cachedStore; private SharedCache sharedCache; + private Configuration conf; @Before public void setUp() throws Exception { - Configuration conf = MetastoreConf.newMetastoreConf(); + conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetaStoreTestUtils.setConfForStandloneMode(conf); objectStore = new ObjectStore(); @@ -76,6 +82,9 @@ public void setUp() throws Exception { sharedCache.getDatabaseCache().clear(); sharedCache.getTableCache().clear(); sharedCache.getSdCache().clear(); + + // Create the 'hive' catalog + HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf)); } /********************************************************************************************** @@ -89,67 +98,67 @@ public void testDatabaseOps() throws Exception { String dbOwner = "user1"; Database db = createTestDb(dbName, dbOwner); objectStore.createDatabase(db); - db = objectStore.getDatabase(dbName); + db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); CachedStore.prewarm(objectStore); // Read database via CachedStore - Database dbRead = cachedStore.getDatabase(dbName); + Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); // Add another db via CachedStore final String dbName1 = "testDatabaseOps1"; Database db1 = createTestDb(dbName1, dbOwner); cachedStore.createDatabase(db1); - db1 = cachedStore.getDatabase(dbName1); + db1 = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1); // Read db via ObjectStore - dbRead = objectStore.getDatabase(dbName1); + dbRead = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1); Assert.assertEquals(db1, dbRead); // Alter the db via CachedStore (can only alter owner or parameters) dbOwner = "user2"; db = new Database(db); db.setOwnerName(dbOwner); - cachedStore.alterDatabase(dbName, db); - db = cachedStore.getDatabase(dbName); + cachedStore.alterDatabase(DEFAULT_CATALOG_NAME, dbName, db); + db = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Read db via ObjectStore - dbRead = objectStore.getDatabase(dbName); + dbRead = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); // Add another db via ObjectStore final String dbName2 = "testDatabaseOps2"; Database db2 = createTestDb(dbName2, dbOwner); objectStore.createDatabase(db2); - db2 = objectStore.getDatabase(dbName2); + db2 = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName2); // Alter db "testDatabaseOps" via ObjectStore dbOwner = "user1"; db = new Database(db); db.setOwnerName(dbOwner); - objectStore.alterDatabase(dbName, db); - db = objectStore.getDatabase(dbName); + objectStore.alterDatabase(DEFAULT_CATALOG_NAME, dbName, db); + db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Drop db "testDatabaseOps1" via ObjectStore - objectStore.dropDatabase(dbName1); + objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName1); // We update twice to accurately detect if cache is dirty or not updateCache(cachedStore); updateCache(cachedStore); // Read the newly added db via CachedStore - dbRead = cachedStore.getDatabase(dbName2); + dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName2); Assert.assertEquals(db2, dbRead); // Read the altered db via CachedStore (altered user from "user2" to "user1") - dbRead = cachedStore.getDatabase(dbName); + dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); // Try to read the dropped db after cache update try { - dbRead = cachedStore.getDatabase(dbName1); + dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1); Assert.fail("The database: " + dbName1 + " should have been removed from the cache after running the update service"); } catch (NoSuchObjectException e) { @@ -157,8 +166,8 @@ public void testDatabaseOps() throws Exception { } // Clean up - objectStore.dropDatabase(dbName); - objectStore.dropDatabase(dbName2); + objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName); + objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName2); sharedCache.getDatabaseCache().clear(); sharedCache.getTableCache().clear(); sharedCache.getSdCache().clear(); @@ -171,7 +180,7 @@ public void testTableOps() throws Exception { String dbOwner = "user1"; Database db = createTestDb(dbName, dbOwner); objectStore.createDatabase(db); - db = objectStore.getDatabase(dbName); + db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Add a table via ObjectStore String tblName = "tbl"; @@ -184,16 +193,16 @@ public void testTableOps() throws Exception { List ptnCols = new ArrayList(); Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols); objectStore.createTable(tbl); - tbl = objectStore.getTable(dbName, tblName); + tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); CachedStore.prewarm(objectStore); // Read database, table via CachedStore - Database dbRead= cachedStore.getDatabase(dbName); + Database dbRead= cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); - Table tblRead = cachedStore.getTable(dbName, tblName); + Table tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); Assert.assertEquals(tbl, tblRead); // Add a new table via CachedStore @@ -201,10 +210,10 @@ public void testTableOps() throws Exception { Table tbl1 = new Table(tbl); tbl1.setTableName(tblName1); cachedStore.createTable(tbl1); - tbl1 = cachedStore.getTable(dbName, tblName1); + tbl1 = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName1); // Read via object store - tblRead = objectStore.getTable(dbName, tblName1); + tblRead = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName1); Assert.assertEquals(tbl1, tblRead); // Add a new table via ObjectStore @@ -212,43 +221,43 @@ public void testTableOps() throws Exception { Table tbl2 = new Table(tbl); tbl2.setTableName(tblName2); objectStore.createTable(tbl2); - tbl2 = objectStore.getTable(dbName, tblName2); + tbl2 = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2); // Alter table "tbl" via ObjectStore tblOwner = "user2"; tbl.setOwner(tblOwner); - objectStore.alterTable(dbName, tblName, tbl); - tbl = objectStore.getTable(dbName, tblName); + objectStore.alterTable(DEFAULT_CATALOG_NAME, dbName, tblName, tbl); + tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); // Drop table "tbl1" via ObjectStore - objectStore.dropTable(dbName, tblName1); + objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName1); // We update twice to accurately detect if cache is dirty or not updateCache(cachedStore); updateCache(cachedStore); // Read "tbl2" via CachedStore - tblRead = cachedStore.getTable(dbName, tblName2); + tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2); Assert.assertEquals(tbl2, tblRead); // Read the altered "tbl" via CachedStore - tblRead = cachedStore.getTable(dbName, tblName); + tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); Assert.assertEquals(tbl, tblRead); // Try to read the dropped "tbl1" via CachedStore (should throw exception) - tblRead = cachedStore.getTable(dbName, tblName1); + tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName1); Assert.assertNull(tblRead); // Should return "tbl" and "tbl2" - List tblNames = cachedStore.getTables(dbName, "*"); + List tblNames = cachedStore.getTables(DEFAULT_CATALOG_NAME, dbName, "*"); Assert.assertTrue(tblNames.contains(tblName)); Assert.assertTrue(!tblNames.contains(tblName1)); Assert.assertTrue(tblNames.contains(tblName2)); // Clean up - objectStore.dropTable(dbName, tblName); - objectStore.dropTable(dbName, tblName2); - objectStore.dropDatabase(dbName); + objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName); + objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName2); + objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName); sharedCache.getDatabaseCache().clear(); sharedCache.getTableCache().clear(); sharedCache.getSdCache().clear(); @@ -261,7 +270,7 @@ public void testPartitionOps() throws Exception { String dbOwner = "user1"; Database db = createTestDb(dbName, dbOwner); objectStore.createDatabase(db); - db = objectStore.getDatabase(dbName); + db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Add a table via ObjectStore String tblName = "tbl"; @@ -276,76 +285,81 @@ public void testPartitionOps() throws Exception { ptnCols.add(ptnCol1); Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols); objectStore.createTable(tbl); - tbl = objectStore.getTable(dbName, tblName); + tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); final String ptnColVal1 = "aaa"; Map partParams = new HashMap(); Partition ptn1 = new Partition(Arrays.asList(ptnColVal1), dbName, tblName, 0, 0, tbl.getSd(), partParams); + ptn1.setCatName(DEFAULT_CATALOG_NAME); objectStore.addPartition(ptn1); - ptn1 = objectStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal1)); + ptn1 = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1)); + ptn1.setCatName(DEFAULT_CATALOG_NAME); final String ptnColVal2 = "bbb"; Partition ptn2 = new Partition(Arrays.asList(ptnColVal2), dbName, tblName, 0, 0, tbl.getSd(), partParams); + ptn2.setCatName(DEFAULT_CATALOG_NAME); objectStore.addPartition(ptn2); - ptn2 = objectStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal2)); + ptn2 = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2)); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); CachedStore.prewarm(objectStore); // Read database, table, partition via CachedStore - Database dbRead = cachedStore.getDatabase(dbName); + Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); - Table tblRead = cachedStore.getTable(dbName, tblName); + Table tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); Assert.assertEquals(tbl, tblRead); - Partition ptn1Read = cachedStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal1)); + Partition ptn1Read = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1)); Assert.assertEquals(ptn1, ptn1Read); - Partition ptn2Read = cachedStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal2)); + Partition ptn2Read = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2)); Assert.assertEquals(ptn2, ptn2Read); // Add a new partition via ObjectStore final String ptnColVal3 = "ccc"; Partition ptn3 = new Partition(Arrays.asList(ptnColVal3), dbName, tblName, 0, 0, tbl.getSd(), partParams); + ptn3.setCatName(DEFAULT_CATALOG_NAME); objectStore.addPartition(ptn3); - ptn3 = objectStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal3)); + ptn3 = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3)); // Alter an existing partition ("aaa") via ObjectStore final String ptnColVal1Alt = "aaaAlt"; Partition ptn1Atl = new Partition(Arrays.asList(ptnColVal1Alt), dbName, tblName, 0, 0, tbl.getSd(), partParams); - objectStore.alterPartition(dbName, tblName, Arrays.asList(ptnColVal1), ptn1Atl); - ptn1Atl = objectStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal1Alt)); + ptn1Atl.setCatName(DEFAULT_CATALOG_NAME); + objectStore.alterPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), ptn1Atl); + ptn1Atl = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt)); // Drop an existing partition ("bbb") via ObjectStore - objectStore.dropPartition(dbName, tblName, Arrays.asList(ptnColVal2)); + objectStore.dropPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2)); // We update twice to accurately detect if cache is dirty or not updateCache(cachedStore); updateCache(cachedStore); // Read the newly added partition via CachedStore - Partition ptnRead = cachedStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal3)); + Partition ptnRead = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3)); Assert.assertEquals(ptn3, ptnRead); // Read the altered partition via CachedStore - ptnRead = cachedStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal1Alt)); + ptnRead = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt)); Assert.assertEquals(ptn1Atl, ptnRead); // Try to read the dropped partition via CachedStore try { - ptnRead = cachedStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal2)); + ptnRead = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2)); Assert.fail("The partition: " + ptnColVal2 + " should have been removed from the cache after running the update service"); } catch (NoSuchObjectException e) { // Expected } // Clean up - objectStore.dropPartition(dbName, tblName, Arrays.asList(ptnColVal1Alt)); - objectStore.dropPartition(dbName, tblName, Arrays.asList(ptnColVal3)); - objectStore.dropTable(dbName, tblName); - objectStore.dropDatabase(dbName); + objectStore.dropPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt)); + objectStore.dropPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3)); + objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName); + objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName); sharedCache.getDatabaseCache().clear(); sharedCache.getTableCache().clear(); sharedCache.getSdCache().clear(); @@ -358,7 +372,7 @@ public void testTableColStatsOps() throws Exception { String dbOwner = "user1"; Database db = createTestDb(dbName, dbOwner); objectStore.createDatabase(db); - db = objectStore.getDatabase(dbName); + db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Add a table via ObjectStore final String tblName = "tbl"; @@ -389,7 +403,7 @@ public void testTableColStatsOps() throws Exception { ptnCols.add(ptnCol1); Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols); objectStore.createTable(tbl); - tbl = objectStore.getTable(dbName, tblName); + tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); // Add ColumnStatistics for tbl to metastore DB via ObjectStore ColumnStatistics stats = new ColumnStatistics(); @@ -440,13 +454,13 @@ public void testTableColStatsOps() throws Exception { // Read table stats via CachedStore ColumnStatistics newStats = - cachedStore.getTableColumnStatistics(dbName, tblName, + cachedStore.getTableColumnStatistics(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(col1.getName(), col2.getName(), col3.getName())); Assert.assertEquals(stats, newStats); // Clean up - objectStore.dropTable(dbName, tblName); - objectStore.dropDatabase(dbName); + objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName); + objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName); sharedCache.getDatabaseCache().clear(); sharedCache.getTableCache().clear(); sharedCache.getSdCache().clear(); @@ -466,11 +480,11 @@ public void testSharedStoreDb() { sharedCache.addDatabaseToCache(db2); sharedCache.addDatabaseToCache(db3); Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 3); - sharedCache.alterDatabaseInCache("db1", newDb1); + sharedCache.alterDatabaseInCache(DEFAULT_CATALOG_NAME, "db1", newDb1); Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 3); - sharedCache.removeDatabaseFromCache("db2"); + sharedCache.removeDatabaseFromCache(DEFAULT_CATALOG_NAME, "db2"); Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 2); - List dbs = sharedCache.listCachedDatabases(); + List dbs = sharedCache.listCachedDatabases(DEFAULT_CATALOG_NAME); Assert.assertEquals(dbs.size(), 2); Assert.assertTrue(dbs.contains("newdb1")); Assert.assertTrue(dbs.contains("db3")); @@ -528,26 +542,26 @@ public void testSharedStoreTable() { newTbl1.setSd(newSd1); newTbl1.setPartitionKeys(new ArrayList<>()); - sharedCache.addTableToCache("db1", "tbl1", tbl1); - sharedCache.addTableToCache("db1", "tbl2", tbl2); - sharedCache.addTableToCache("db1", "tbl3", tbl3); - sharedCache.addTableToCache("db2", "tbl1", tbl1); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", tbl1); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl2", tbl2); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl3", tbl3); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", tbl1); Assert.assertEquals(sharedCache.getCachedTableCount(), 4); Assert.assertEquals(sharedCache.getSdCache().size(), 2); - Table t = sharedCache.getTableFromCache("db1", "tbl1"); + Table t = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1"); Assert.assertEquals(t.getSd().getLocation(), "loc1"); - sharedCache.removeTableFromCache("db1", "tbl1"); + sharedCache.removeTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1"); Assert.assertEquals(sharedCache.getCachedTableCount(), 3); Assert.assertEquals(sharedCache.getSdCache().size(), 2); - sharedCache.alterTableInCache("db2", "tbl1", newTbl1); + sharedCache.alterTableInCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", newTbl1); Assert.assertEquals(sharedCache.getCachedTableCount(), 3); Assert.assertEquals(sharedCache.getSdCache().size(), 3); - sharedCache.removeTableFromCache("db1", "tbl2"); + sharedCache.removeTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl2"); Assert.assertEquals(sharedCache.getCachedTableCount(), 2); Assert.assertEquals(sharedCache.getSdCache().size(), 2); } @@ -568,9 +582,9 @@ public void testSharedStorePartition() { cols.add(col2); List ptnCols = new ArrayList(); Table tbl1 = createTestTbl(dbName, tbl1Name, owner, cols, ptnCols); - sharedCache.addTableToCache(dbName, tbl1Name, tbl1); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1); Table tbl2 = createTestTbl(dbName, tbl2Name, owner, cols, ptnCols); - sharedCache.addTableToCache(dbName, tbl2Name, tbl2); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2); Partition part1 = new Partition(); StorageDescriptor sd1 = new StorageDescriptor(); @@ -622,20 +636,20 @@ public void testSharedStorePartition() { newPart1.setSd(newSd1); newPart1.setValues(Arrays.asList("201701")); - sharedCache.addPartitionToCache(dbName, tbl1Name, part1); - sharedCache.addPartitionToCache(dbName, tbl1Name, part2); - sharedCache.addPartitionToCache(dbName, tbl1Name, part3); - sharedCache.addPartitionToCache(dbName, tbl2Name, part1); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part1); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part2); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part3); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, part1); - Partition t = sharedCache.getPartitionFromCache(dbName, tbl1Name, Arrays.asList("201701")); + Partition t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701")); Assert.assertEquals(t.getSd().getLocation(), "loc1"); - sharedCache.removePartitionFromCache(dbName, tbl2Name, Arrays.asList("201701")); - t = sharedCache.getPartitionFromCache(dbName, tbl2Name, Arrays.asList("201701")); + sharedCache.removePartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, Arrays.asList("201701")); + t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, Arrays.asList("201701")); Assert.assertNull(t); - sharedCache.alterPartitionInCache(dbName, tbl1Name, Arrays.asList("201701"), newPart1); - t = sharedCache.getPartitionFromCache(dbName, tbl1Name, Arrays.asList("201701")); + sharedCache.alterPartitionInCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701"), newPart1); + t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701")); Assert.assertEquals(t.getSd().getLocation(), "loc1new"); } @@ -645,7 +659,10 @@ public void testAggrStatsRepeatedRead() throws Exception { String tblName = "tbl"; String colName = "f1"; - Database db = new Database(dbName, null, "some_location", null); + Database db = new DatabaseBuilder() + .setName(dbName) + .setLocation("some_location") + .build(conf); cachedStore.createDatabase(db); List cols = new ArrayList<>(); @@ -659,6 +676,7 @@ public void testAggrStatsRepeatedRead() throws Exception { Table tbl = new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); + tbl.setCatName(DEFAULT_CATALOG_NAME); cachedStore.createTable(tbl); List partVals1 = new ArrayList<>(); @@ -668,9 +686,11 @@ public void testAggrStatsRepeatedRead() throws Exception { Partition ptn1 = new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>()); + ptn1.setCatName(DEFAULT_CATALOG_NAME); cachedStore.addPartition(ptn1); Partition ptn2 = new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>()); + ptn2.setCatName(DEFAULT_CATALOG_NAME); cachedStore.addPartition(ptn2); ColumnStatistics stats = new ColumnStatistics(); @@ -699,9 +719,9 @@ public void testAggrStatsRepeatedRead() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); - aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); } @@ -712,6 +732,7 @@ public void testPartitionAggrStats() throws Exception { String colName = "f1"; Database db = new Database(dbName, null, "some_location", null); + db.setCatalogName(DEFAULT_CATALOG_NAME); cachedStore.createDatabase(db); List cols = new ArrayList<>(); @@ -725,6 +746,7 @@ public void testPartitionAggrStats() throws Exception { Table tbl = new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); + tbl.setCatName(DEFAULT_CATALOG_NAME); cachedStore.createTable(tbl); List partVals1 = new ArrayList<>(); @@ -734,9 +756,11 @@ public void testPartitionAggrStats() throws Exception { Partition ptn1 = new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>()); + ptn1.setCatName(DEFAULT_CATALOG_NAME); cachedStore.addPartition(ptn1); Partition ptn2 = new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>()); + ptn2.setCatName(DEFAULT_CATALOG_NAME); cachedStore.addPartition(ptn2); ColumnStatistics stats = new ColumnStatistics(); @@ -767,10 +791,10 @@ public void testPartitionAggrStats() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40); - aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40); } @@ -782,6 +806,7 @@ public void testPartitionAggrStatsBitVector() throws Exception { String colName = "f1"; Database db = new Database(dbName, null, "some_location", null); + db.setCatalogName(DEFAULT_CATALOG_NAME); cachedStore.createDatabase(db); List cols = new ArrayList<>(); @@ -795,6 +820,7 @@ public void testPartitionAggrStatsBitVector() throws Exception { Table tbl = new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); + tbl.setCatName(DEFAULT_CATALOG_NAME); cachedStore.createTable(tbl); List partVals1 = new ArrayList<>(); @@ -804,9 +830,11 @@ public void testPartitionAggrStatsBitVector() throws Exception { Partition ptn1 = new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>()); + ptn1.setCatName(DEFAULT_CATALOG_NAME); cachedStore.addPartition(ptn1); Partition ptn2 = new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>()); + ptn2.setCatName(DEFAULT_CATALOG_NAME); cachedStore.addPartition(ptn2); ColumnStatistics stats = new ColumnStatistics(); @@ -851,10 +879,10 @@ public void testPartitionAggrStatsBitVector() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5); - aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5); } @@ -885,7 +913,7 @@ public Object call() { } executor.invokeAll(tasks); for (String dbName : dbNames) { - Database db = sharedCache.getDatabaseFromCache(dbName); + Database db = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName); Assert.assertNotNull(db); Assert.assertEquals(dbName, db.getName()); } @@ -906,7 +934,7 @@ public Object call() { Callable c = new Callable() { public Object call() { Table tbl = createTestTbl(dbNames.get(0), tblName, "user1", cols, ptnCols); - sharedCache.addTableToCache(dbNames.get(0), tblName, tbl); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, tbl); return null; } }; @@ -914,7 +942,7 @@ public Object call() { } executor.invokeAll(tasks); for (String tblName : tblNames) { - Table tbl = sharedCache.getTableFromCache(dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); Assert.assertNotNull(tbl); Assert.assertEquals(tblName, tbl.getTableName()); } @@ -923,14 +951,14 @@ public Object call() { List ptnVals = new ArrayList(Arrays.asList("aaa", "bbb", "ccc", "ddd", "eee")); tasks.clear(); for (String tblName : tblNames) { - Table tbl = sharedCache.getTableFromCache(dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); for (String ptnVal : ptnVals) { Map partParams = new HashMap(); Callable c = new Callable() { public Object call() { Partition ptn = new Partition(Arrays.asList(ptnVal), dbNames.get(0), tblName, 0, 0, tbl.getSd(), partParams); - sharedCache.addPartitionToCache(dbNames.get(0), tblName, ptn); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, ptn); return null; } }; @@ -940,7 +968,7 @@ public Object call() { executor.invokeAll(tasks); for (String tblName : tblNames) { for (String ptnVal : ptnVals) { - Partition ptn = sharedCache.getPartitionFromCache(dbNames.get(0), tblName, Arrays.asList(ptnVal)); + Partition ptn = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, Arrays.asList(ptnVal)); Assert.assertNotNull(ptn); Assert.assertEquals(tblName, ptn.getTableName()); Assert.assertEquals(tblName, ptn.getTableName()); @@ -957,7 +985,7 @@ public Object call() { for (String ptnVal : ptnVals) { Callable c = new Callable() { public Object call() { - sharedCache.removePartitionFromCache(dbNames.get(0), tblName, Arrays.asList(ptnVal)); + sharedCache.removePartitionFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, Arrays.asList(ptnVal)); return null; } }; @@ -965,14 +993,14 @@ public Object call() { } } for (String tblName : addPtnTblNames) { - Table tbl = sharedCache.getTableFromCache(dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); for (String ptnVal : newPtnVals) { Map partParams = new HashMap(); Callable c = new Callable() { public Object call() { Partition ptn = new Partition(Arrays.asList(ptnVal), dbNames.get(0), tblName, 0, 0, tbl.getSd(), partParams); - sharedCache.addPartitionToCache(dbNames.get(0), tblName, ptn); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, ptn); return null; } }; @@ -982,7 +1010,7 @@ public Object call() { executor.invokeAll(tasks); for (String tblName : addPtnTblNames) { for (String ptnVal : newPtnVals) { - Partition ptn = sharedCache.getPartitionFromCache(dbNames.get(0), tblName, Arrays.asList(ptnVal)); + Partition ptn = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, Arrays.asList(ptnVal)); Assert.assertNotNull(ptn); Assert.assertEquals(tblName, ptn.getTableName()); Assert.assertEquals(tblName, ptn.getTableName()); @@ -990,7 +1018,7 @@ public Object call() { } } for (String tblName : dropPtnTblNames) { - List ptns = sharedCache.listCachedPartitions(dbNames.get(0), tblName, 100); + List ptns = sharedCache.listCachedPartitions(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, 100); Assert.assertEquals(0, ptns.size()); } sharedCache.getDatabaseCache().clear(); @@ -1005,6 +1033,7 @@ private Database createTestDb(String dbName, String dbOwner) { Database db = new Database(dbName, dbDescription, dbLocation, dbParams); db.setOwnerName(dbOwner); db.setOwnerType(PrincipalType.USER); + db.setCatalogName(DEFAULT_CATALOG_NAME); return db; } @@ -1019,6 +1048,7 @@ private Table createTestTbl(String dbName, String tblName, String tblOwner, sd.setStoredAsSubDirectories(false); Table tbl = new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, ptnCols, tblParams, null, null, TableType.MANAGED_TABLE.toString()); + tbl.setCatName(DEFAULT_CATALOG_NAME); return tbl; } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java new file mode 100644 index 0000000000..423dce8a68 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.cache; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.HiveMetaStore; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.ObjectStore; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.util.Comparator; +import java.util.List; + +/** + * Tests that catalogs are properly cached. + */ +@Category(MetastoreCheckinTest.class) +public class TestCatalogCaching { + private static final String CAT1_NAME = "cat1"; + private static final String CAT2_NAME = "cat2"; + + private ObjectStore objectStore; + private Configuration conf; + private CachedStore cachedStore; + + @Before + public void createObjectStore() throws MetaException, InvalidOperationException { + conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); + MetaStoreTestUtils.setConfForStandloneMode(conf); + objectStore = new ObjectStore(); + objectStore.setConf(conf); + + // Create three catalogs + HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf)); + + Catalog cat1 = new CatalogBuilder() + .setName(CAT1_NAME) + .setLocation("/tmp/cat1") + .build(); + objectStore.createCatalog(cat1); + Catalog cat2 = new CatalogBuilder() + .setName(CAT2_NAME) + .setLocation("/tmp/cat2") + .build(); + objectStore.createCatalog(cat2); + } + + @After + public void clearCatalogCache() throws MetaException, NoSuchObjectException { + List catalogs = objectStore.getCatalogs(); + for (String catalog : catalogs) objectStore.dropCatalog(catalog); + } + + @Test + public void defaultHiveOnly() throws Exception { + // By default just the Hive catalog should be cached. + cachedStore = new CachedStore(); + cachedStore.setConf(conf); + CachedStore.stopCacheUpdateService(1); + cachedStore.resetCatalogCache(); + + CachedStore.prewarm(objectStore); + + // Only the hive catalog should be cached + List cachedCatalogs = cachedStore.getCatalogs(); + Assert.assertEquals(1, cachedCatalogs.size()); + Assert.assertEquals(Warehouse.DEFAULT_CATALOG_NAME, cachedCatalogs.get(0)); + } + + @Test + public void cacheAll() throws Exception { + // Set the config value to empty string, which should result in all catalogs being cached. + Configuration newConf = new Configuration(conf); + MetastoreConf.setVar(newConf, MetastoreConf.ConfVars.CATALOGS_TO_CACHE, ""); + cachedStore = new CachedStore(); + cachedStore.setConf(newConf); + CachedStore.stopCacheUpdateService(1); + objectStore.setConf(newConf); // have to override it with the new conf since this is where + // prewarm gets the conf object + cachedStore.resetCatalogCache(); + + CachedStore.prewarm(objectStore); + + // All the catalogs should be cached + List cachedCatalogs = cachedStore.getCatalogs(); + Assert.assertEquals(3, cachedCatalogs.size()); + cachedCatalogs.sort(Comparator.naturalOrder()); + Assert.assertEquals(CAT1_NAME, cachedCatalogs.get(0)); + Assert.assertEquals(CAT2_NAME, cachedCatalogs.get(1)); + Assert.assertEquals(Warehouse.DEFAULT_CATALOG_NAME, cachedCatalogs.get(2)); + } + + @Test + public void cacheSome() throws Exception { + // Set the config value to 2 catalogs other than hive + Configuration newConf = new Configuration(conf); + MetastoreConf.setVar(newConf, MetastoreConf.ConfVars.CATALOGS_TO_CACHE, CAT1_NAME + "," + CAT2_NAME); + cachedStore = new CachedStore(); + cachedStore.setConf(newConf); + CachedStore.stopCacheUpdateService(1); + objectStore.setConf(newConf); // have to override it with the new conf since this is where + // prewarm gets the conf object + cachedStore.resetCatalogCache(); + + CachedStore.prewarm(objectStore); + + // All the catalogs should be cached + List cachedCatalogs = cachedStore.getCatalogs(); + Assert.assertEquals(2, cachedCatalogs.size()); + cachedCatalogs.sort(Comparator.naturalOrder()); + Assert.assertEquals(CAT1_NAME, cachedCatalogs.get(0)); + Assert.assertEquals(CAT2_NAME, cachedCatalogs.get(1)); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/MetaStoreFactoryForTests.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/MetaStoreFactoryForTests.java index 84c187bad6..1a57df2680 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/MetaStoreFactoryForTests.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/MetaStoreFactoryForTests.java @@ -87,6 +87,7 @@ private MetaStoreFactoryForTests() {} // Create Embedded MetaStore conf.set("javax.jdo.option.ConnectionURL", "jdbc:derby:memory:${test.tmp.dir}/junit_metastore_db1;create=true"); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.TRY_DIRECT_SQL, false); AbstractMetaStoreService embedded = new MiniHMS.Builder() .setConf(conf) @@ -97,6 +98,7 @@ private MetaStoreFactoryForTests() {} // Create Remote MetaStore conf.set("javax.jdo.option.ConnectionURL", "jdbc:derby:memory:${test.tmp.dir}/junit_metastore_db2;create=true"); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.TRY_DIRECT_SQL, true); AbstractMetaStoreService remote = new MiniHMS.Builder() .setConf(conf) diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java index 4d9cb1b33b..8555eee354 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java @@ -19,12 +19,16 @@ package org.apache.hadoop.hive.metastore.client; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; @@ -34,6 +38,7 @@ import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; @@ -81,10 +86,9 @@ public void setUp() throws Exception { // Clean up the database client.dropDatabase(DB_NAME, true, true, true); metaStore.cleanWarehouseDirs(); - Database db = new DatabaseBuilder(). + new DatabaseBuilder(). setName(DB_NAME). - build(); - client.createDatabase(db); + create(client, metaStore.getConf()); } @After @@ -123,6 +127,72 @@ public void testAddPartitionTwoValues() throws Exception { } @Test + public void addPartitionOtherCatalog() throws TException { + String catName = "add_partition_catalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "add_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, metaStore.getConf()); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < parts.length; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .build(metaStore.getConf()); + } + client.add_partition(parts[0]); + Assert.assertEquals(2, client.add_partitions(Arrays.asList(parts[1], parts[2]))); + client.add_partitions(Arrays.asList(parts), true, false); + + for (int i = 0; i < parts.length; i++) { + Partition fetched = client.getPartition(catName, dbName, tableName, + Collections.singletonList("a" + i)); + Assert.assertEquals(catName, fetched.getCatName()); + Assert.assertEquals(dbName, fetched.getDbName()); + Assert.assertEquals(tableName, fetched.getTableName()); + } + + client.dropDatabase(catName, dbName, true, true, true); + client.dropCatalog(catName); + } + + @Test(expected = InvalidObjectException.class) + public void noSuchCatalog() throws TException { + String tableName = "table_for_no_such_catalog"; + Table table = new TableBuilder() + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, metaStore.getConf()); + + Partition part = new PartitionBuilder() + .inTable(table) + .addValue("a") + .build(metaStore.getConf()); + // Explicitly mis-set the catalog name + part.setCatName("nosuch"); + client.add_partition(part); + } + + @Test public void testAddPartitionWithDefaultAttributes() throws Exception { Table table = createTable(); @@ -134,7 +204,7 @@ public void testAddPartitionWithDefaultAttributes() throws Exception { .setCols(getYearPartCol()) .addCol("test_id", "int", "test col id") .addCol("test_value", "string", "test col value") - .build(); + .build(metaStore.getConf()); client.add_partition(partition); @@ -270,7 +340,7 @@ public void testAddPartitionEmptyColsInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); - partition.getSd().setCols(new ArrayList()); + partition.getSd().setCols(new ArrayList<>()); client.add_partition(partition); // TODO: Not sure that this is the correct behavior. It doesn't make sense to create the @@ -372,8 +442,7 @@ public void testAddPartitionForView() throws Exception { .addCol("test_value", DEFAULT_COL_TYPE, "test col value") .addPartCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(null) - .build(); - client.createTable(table); + .create(client, metaStore.getConf()); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); client.add_partition(partition); } @@ -427,8 +496,7 @@ public void testAddPartitionNoPartColOnTable() throws Exception { .setTableName(TABLE_NAME) .addCol("test_id", "int", "test col id") .addCol("test_value", "string", "test col value") - .build(); - client.createTable(origTable); + .create(client, metaStore.getConf()); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); client.add_partition(partition); } @@ -442,7 +510,7 @@ public void testAddPartitionNoColInPartition() throws Exception { .setTableName(TABLE_NAME) .addValue(DEFAULT_YEAR_VALUE) .setLocation(metaStore.getWarehouseRoot() + "/addparttest") - .build(); + .build(metaStore.getConf()); client.add_partition(partition); } @@ -455,7 +523,7 @@ public void testAddPartitionDifferentNamesAndTypesInColAndTableCol() throws Exce .setTableName(TABLE_NAME) .addValue("1000") .addCol("time", "int") - .build(); + .build(metaStore.getConf()); client.add_partition(partition); Partition part = client.getPartition(DB_NAME, TABLE_NAME, "year=1000"); @@ -474,7 +542,7 @@ public void testAddPartitionNoValueInPartition() throws Exception { .setTableName(TABLE_NAME) .addCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(metaStore.getWarehouseRoot() + "/addparttest") - .build(); + .build(metaStore.getConf()); client.add_partition(partition); } @@ -588,7 +656,7 @@ public void testAddPartitionsWithDefaultAttributes() throws Exception { .setCols(getYearPartCol()) .addCol("test_id", "int", "test col id") .addCol("test_value", "string", "test col value") - .build(); + .build(metaStore.getConf()); client.add_partitions(Lists.newArrayList(partition)); @@ -622,7 +690,7 @@ public void testAddPartitionsNullList() throws Exception { @Test public void testAddPartitionsEmptyList() throws Exception { - client.add_partitions(new ArrayList()); + client.add_partitions(new ArrayList<>()); } @Test(expected = MetaException.class) @@ -873,7 +941,7 @@ public void testAddPartitionsEmptyColsInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); - partition.getSd().setCols(new ArrayList()); + partition.getSd().setCols(new ArrayList<>()); client.add_partitions(Lists.newArrayList(partition)); // TODO: Not sure that this is the correct behavior. It doesn't make sense to create the @@ -976,8 +1044,7 @@ public void testAddPartitionsForView() throws Exception { .addCol("test_value", "string", "test col value") .addPartCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(null) - .build(); - client.createTable(table); + .create(client, metaStore.getConf()); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); List partitions = Lists.newArrayList(partition); client.add_partitions(partitions); @@ -1044,7 +1111,7 @@ public void testAddPartitionsNoValueInPartition() throws Exception { .setTableName(TABLE_NAME) .addCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(metaStore.getWarehouseRoot() + "/addparttest") - .build(); + .build(metaStore.getConf()); List partitions = new ArrayList<>(); partitions.add(partition); client.add_partitions(partitions); @@ -1160,7 +1227,7 @@ public void testAddPartsNullList() throws Exception { public void testAddPartsEmptyList() throws Exception { List addedPartitions = - client.add_partitions(new ArrayList(), false, true); + client.add_partitions(new ArrayList<>(), false, true); Assert.assertNotNull(addedPartitions); Assert.assertTrue(addedPartitions.isEmpty()); } @@ -1276,8 +1343,7 @@ public void testAddPartsNullPartition() throws Exception { // Helper methods private void createDB(String dbName) throws TException { - Database db = new DatabaseBuilder().setName(dbName).build(); - client.createDatabase(db); + new DatabaseBuilder().setName(dbName).create(client, metaStore.getConf()); } private Table createTable() throws Exception { @@ -1302,13 +1368,12 @@ private Table createTable(String dbName, String tableName, List par .setStoredAsSubDirectories(false) .addSerdeParam("partTestSerdeParamKey", "partTestSerdeParamValue") .setLocation(location) - .build(); - client.createTable(table); + .create(client, metaStore.getConf()); return client.getTable(dbName, tableName); } private void createExternalTable(String tableName, String location) throws Exception { - Table table = new TableBuilder() + new TableBuilder() .setDbName(DB_NAME) .setTableName(tableName) .addCol("test_id", "int", "test col id") @@ -1316,8 +1381,7 @@ private void createExternalTable(String tableName, String location) throws Excep .addPartCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .addTableParam("EXTERNAL", "TRUE") .setLocation(location) - .build(); - client.createTable(table); + .create(client, metaStore.getConf()); } private Partition buildPartition(String dbName, String tableName, String value) @@ -1337,7 +1401,7 @@ private Partition buildPartition(String dbName, String tableName, String value, .addCol("test_value", "string", "test col value") .addPartParam(DEFAULT_PARAM_KEY, DEFAULT_PARAM_VALUE) .setLocation(location) - .build(); + .build(metaStore.getConf()); return partition; } @@ -1357,7 +1421,7 @@ private Partition buildPartition(List values, List partCols .setLastAccessTime(123456) .addCol("test_id", "int", "test col id") .addCol("test_value", "string", "test col value") - .build(); + .build(metaStore.getConf()); return partition; } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java index 1122057a40..b32954ffd7 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java @@ -88,10 +88,9 @@ public void setUp() throws Exception { // Clean up the database client.dropDatabase(DB_NAME, true, true, true); metaStore.cleanWarehouseDirs(); - Database db = new DatabaseBuilder(). + new DatabaseBuilder(). setName(DB_NAME). - build(); - client.createDatabase(db); + create(client, metaStore.getConf()); } @After @@ -169,6 +168,8 @@ public void testAddPartitionSpecsMultipleValues() throws Exception { verifyPartitionSharedSD(table, "year=2005/month=may", Lists.newArrayList("2005", "may"), 4); } + // TODO add tests for partitions in other catalogs + @Test(expected = NullPointerException.class) public void testAddPartitionSpecNullSpec() throws Exception { @@ -679,8 +680,7 @@ public void testAddPartitionSpecForView() throws Exception { .addCol("test_value", DEFAULT_COL_TYPE, "test col value") .addPartCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(null) - .build(); - client.createTable(table); + .create(client, metaStore.getConf()); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); PartitionSpecProxy partitionSpecProxy = @@ -714,7 +714,7 @@ public void testAddPartitionSpecNoValue() throws Exception { .setTableName(TABLE_NAME) .addCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(metaStore.getWarehouseRoot() + "/addpartspectest") - .build(); + .build(metaStore.getConf()); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, Lists.newArrayList(partition)); @@ -821,8 +821,7 @@ public void testAddPartitionSpecOneInvalid() throws Exception { // Helper methods private void createDB(String dbName) throws TException { - Database db = new DatabaseBuilder().setName(dbName).build(); - client.createDatabase(db); + Database db = new DatabaseBuilder().setName(dbName).create(client, metaStore.getConf()); } private Table createTable() throws Exception { @@ -844,8 +843,7 @@ private Table createTable(String dbName, String tableName, List par .setStoredAsSubDirectories(false) .addSerdeParam("partTestSerdeParamKey", "partTestSerdeParamValue") .setLocation(location) - .build(); - client.createTable(table); + .create(client, metaStore.getConf()); return client.getTable(dbName, tableName); } @@ -866,7 +864,7 @@ private Partition buildPartition(String dbName, String tableName, String value, .addCol("test_value", "string", "test col value") .addPartParam(DEFAULT_PARAM_KEY, DEFAULT_PARAM_VALUE) .setLocation(location) - .build(); + .build(metaStore.getConf()); return partition; } @@ -886,7 +884,7 @@ private Partition buildPartition(List values, List partCols .setLastAccessTime(DEFAULT_CREATE_TIME) .addCol("test_id", "int", "test col id") .addCol("test_value", "string", "test col value") - .build(); + .build(metaStore.getConf()); return partition; } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java index 747f66da58..770da1a5cf 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java @@ -19,11 +19,15 @@ package org.apache.hadoop.hive.metastore.client; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -31,6 +35,7 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; @@ -41,6 +46,8 @@ import com.google.common.collect.Lists; import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -48,6 +55,7 @@ import org.junit.runners.Parameterized; import static java.util.stream.Collectors.joining; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; @@ -60,7 +68,7 @@ @RunWith(Parameterized.class) @Category(MetastoreCheckinTest.class) public class TestAlterPartitions extends MetaStoreClientTest { - public static final int NEW_CREATE_TIME = 123456789; + private static final int NEW_CREATE_TIME = 123456789; private AbstractMetaStoreService metaStore; private IMetaStoreClient client; @@ -95,13 +103,12 @@ public void tearDown() throws Exception { } private void createDB(String dbName) throws TException { - Database db = new DatabaseBuilder(). + new DatabaseBuilder(). setName(dbName). - build(); - client.createDatabase(db); + create(client, metaStore.getConf()); } - private static Table createTestTable(IMetaStoreClient client, String dbName, String tableName, + private Table createTestTable(IMetaStoreClient client, String dbName, String tableName, List partCols, boolean setPartitionLevelPrivilages) throws Exception { TableBuilder builder = new TableBuilder() @@ -111,7 +118,7 @@ private static Table createTestTable(IMetaStoreClient client, String dbName, Str .addCol("name", "string"); partCols.forEach(col -> builder.addPartCol(col, "string")); - Table table = builder.build(); + Table table = builder.build(metaStore.getConf()); if (setPartitionLevelPrivilages) { table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true"); @@ -121,14 +128,14 @@ private static Table createTestTable(IMetaStoreClient client, String dbName, Str return table; } - private static void addPartition(IMetaStoreClient client, Table table, List values) + private void addPartition(IMetaStoreClient client, Table table, List values) throws TException { - PartitionBuilder partitionBuilder = new PartitionBuilder().fromTable(table); + PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table); values.forEach(val -> partitionBuilder.addValue(val)); - client.add_partition(partitionBuilder.build()); + client.add_partition(partitionBuilder.build(metaStore.getConf())); } - private static List> createTable4PartColsParts(IMetaStoreClient client) throws + private List> createTable4PartColsParts(IMetaStoreClient client) throws Exception { Table t = createTestTable(client, DB_NAME, TABLE_NAME, PARTCOL_SCHEMA, false); List> testValues = Lists.newArrayList( @@ -197,7 +204,6 @@ private void assertPartitionChanged(Partition partition, List testValues /** * Testing alter_partition(String,String,Partition) -> * alter_partition_with_environment_context(String,String,Partition,null). - * @throws Exception */ @Test public void testAlterPartition() throws Exception { @@ -217,12 +223,152 @@ public void testAlterPartition() throws Exception { } + @Test + public void otherCatalog() throws TException { + String catName = "alter_partition_catalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "alter_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, metaStore.getConf()); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < 5; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir("b" + i)) + .build(metaStore.getConf()); + } + client.add_partitions(Arrays.asList(parts)); + + Partition newPart = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a0")); + newPart.getParameters().put("test_key", "test_value"); + client.alter_partition(catName, dbName, tableName, newPart); + + Partition fetched = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a0")); + Assert.assertEquals(catName, fetched.getCatName()); + Assert.assertEquals("test_value", fetched.getParameters().get("test_key")); + + newPart = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a1")); + newPart.setLastAccessTime(3); + Partition newPart1 = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a2")); + newPart1.getSd().setLocation(MetaStoreTestUtils.getTestWarehouseDir("somewhere")); + client.alter_partitions(catName, dbName, tableName, Arrays.asList(newPart, newPart1)); + fetched = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a1")); + Assert.assertEquals(catName, fetched.getCatName()); + Assert.assertEquals(3L, fetched.getLastAccessTime()); + fetched = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a2")); + Assert.assertEquals(catName, fetched.getCatName()); + Assert.assertTrue(fetched.getSd().getLocation().contains("somewhere")); + + newPart = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a4")); + newPart.getParameters().put("test_key", "test_value"); + EnvironmentContext ec = new EnvironmentContext(); + ec.setProperties(Collections.singletonMap("a", "b")); + client.alter_partition(catName, dbName, tableName, newPart, ec); + fetched = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a4")); + Assert.assertEquals(catName, fetched.getCatName()); + Assert.assertEquals("test_value", fetched.getParameters().get("test_key")); + + + client.dropDatabase(catName, dbName, true, true, true); + client.dropCatalog(catName); + } + + @SuppressWarnings("deprecation") + @Test + public void deprecatedCalls() throws TException { + String tableName = "deprecated_table"; + Table table = new TableBuilder() + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, metaStore.getConf()); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < 5; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir("a" + i)) + .build(metaStore.getConf()); + } + client.add_partitions(Arrays.asList(parts)); + + Partition newPart = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a0")); + newPart.getParameters().put("test_key", "test_value"); + client.alter_partition(DEFAULT_DATABASE_NAME, tableName, newPart); + + Partition fetched = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a0")); + Assert.assertEquals("test_value", fetched.getParameters().get("test_key")); + + newPart = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a1")); + newPart.setLastAccessTime(3); + Partition newPart1 = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a2")); + newPart1.getSd().setLocation("somewhere"); + client.alter_partitions(DEFAULT_DATABASE_NAME, tableName, Arrays.asList(newPart, newPart1)); + fetched = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a1")); + Assert.assertEquals(3L, fetched.getLastAccessTime()); + fetched = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a2")); + Assert.assertTrue(fetched.getSd().getLocation().contains("somewhere")); + + newPart = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a3")); + newPart.setValues(Collections.singletonList("b3")); + client.renamePartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a3"), newPart); + fetched = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("b3")); + Assert.assertEquals(1, fetched.getValuesSize()); + Assert.assertEquals("b3", fetched.getValues().get(0)); + + newPart = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a4")); + newPart.getParameters().put("test_key", "test_value"); + EnvironmentContext ec = new EnvironmentContext(); + ec.setProperties(Collections.singletonMap("a", "b")); + client.alter_partition(DEFAULT_DATABASE_NAME, tableName, newPart, ec); + fetched = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a4")); + Assert.assertEquals("test_value", fetched.getParameters().get("test_key")); + } + @Test(expected = InvalidOperationException.class) public void testAlterPartitionUnknownPartition() throws Exception { createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).addValue("1111").addValue("11").addValue("11").build(); + Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf()); client.alter_partition(DB_NAME, TABLE_NAME, part); } @@ -231,7 +377,7 @@ public void testAlterPartitionIncompletePartitionVals() throws Exception { createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).addValue("2017").build(); + Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf()); client.alter_partition(DB_NAME, TABLE_NAME, part); } @@ -240,11 +386,18 @@ public void testAlterPartitionMissingPartitionVals() throws Exception { createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).build(); + Partition part = builder.inTable(t).build(metaStore.getConf()); client.alter_partition(DB_NAME, TABLE_NAME, part); } @Test(expected = InvalidOperationException.class) + public void testAlterPartitionBogusCatalogName() throws Exception { + createTable4PartColsParts(client); + List partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); + client.alter_partition("nosuch", DB_NAME, TABLE_NAME, partitions.get(3)); + } + + @Test(expected = InvalidOperationException.class) public void testAlterPartitionNoDbName() throws Exception { createTable4PartColsParts(client); List partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); @@ -315,7 +468,6 @@ public void testAlterPartitionChangeValues() throws Exception { /** * Testing alter_partition(String,String,Partition,EnvironmentContext) -> * alter_partition_with_environment_context(String,String,Partition,EnvironmentContext). - * @throws Exception */ @Test public void testAlterPartitionWithEnvironmentCtx() throws Exception { @@ -349,7 +501,7 @@ public void testAlterPartitionWithEnvironmentCtxUnknownPartition() throws Except createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).addValue("1111").addValue("11").addValue("11").build(); + Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf()); client.alter_partition(DB_NAME, TABLE_NAME, part, new EnvironmentContext()); } @@ -358,7 +510,7 @@ public void testAlterPartitionWithEnvironmentCtxIncompletePartitionVals() throws createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).addValue("2017").build(); + Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf()); client.alter_partition(DB_NAME, TABLE_NAME, part, new EnvironmentContext()); } @@ -367,7 +519,7 @@ public void testAlterPartitionWithEnvironmentCtxMissingPartitionVals() throws Ex createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).build(); + Partition part = builder.inTable(t).build(metaStore.getConf()); client.alter_partition(DB_NAME, TABLE_NAME, part, new EnvironmentContext()); } @@ -444,7 +596,6 @@ public void testAlterPartitionWithEnvironmentCtxChangeValues() throws Exception * Testing * alter_partitions(String,String,List(Partition)) -> * alter_partitions_with_environment_context(String,String,List(Partition),null). - * @throws Exception */ @Test public void testAlterPartitions() throws Exception { @@ -478,7 +629,7 @@ public void testAlterPartitionsUnknownPartition() throws Exception { createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).addValue("1111").addValue("11").addValue("11").build(); + Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf()); part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1).get(0); makeTestChangesOnPartition(part1); client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1)); @@ -494,7 +645,7 @@ public void testAlterPartitionsIncompletePartitionVals() throws Exception { createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).addValue("2017").build(); + Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf()); Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1)); } @@ -504,12 +655,19 @@ public void testAlterPartitionsMissingPartitionVals() throws Exception { createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).build(); + Partition part = builder.inTable(t).build(metaStore.getConf()); Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1)); } @Test(expected = InvalidOperationException.class) + public void testAlterPartitionsBogusCatalogName() throws Exception { + createTable4PartColsParts(client); + Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); + client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part)); + } + + @Test(expected = InvalidOperationException.class) public void testAlterPartitionsNoDbName() throws Exception { createTable4PartColsParts(client); Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); @@ -596,7 +754,6 @@ public void testAlterPartitionsChangeValues() throws Exception { * Testing * alter_partitions(String,String,List(Partition),EnvironmentContext) -> * alter_partitions_with_environment_context(String,String,List(Partition),EnvironmentContext). - * @throws Exception */ @Test public void testAlterPartitionsWithEnvironmentCtx() throws Exception { @@ -642,7 +799,7 @@ public void testAlterPartitionsWithEnvironmentCtxUnknownPartition() throws Excep createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).addValue("1111").addValue("11").addValue("11").build(); + Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf()); Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1), new EnvironmentContext()); @@ -653,7 +810,7 @@ public void testAlterPartitionsWithEnvironmentCtxIncompletePartitionVals() throw createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).addValue("2017").build(); + Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf()); Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1), new EnvironmentContext()); @@ -664,13 +821,20 @@ public void testAlterPartitionsWithEnvironmentCtxMissingPartitionVals() throws E createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).build(); + Partition part = builder.inTable(t).build(metaStore.getConf()); Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1), new EnvironmentContext()); } @Test(expected = InvalidOperationException.class) + public void testAlterPartitionsWithEnvironmentCtxBogusCatalogName() throws Exception { + createTable4PartColsParts(client); + Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); + client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext()); + } + + @Test(expected = InvalidOperationException.class) public void testAlterPartitionsWithEnvironmentCtxNoDbName() throws Exception { createTable4PartColsParts(client); Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); @@ -757,7 +921,6 @@ public void testAlterPartitionsWithEnvironmentCtxChangeValues() throws Exception * Testing * renamePartition(String,String,List(String),Partition) -> * renamePartition(String,String,List(String),Partition). - * @throws Exception */ @Test public void testRenamePartition() throws Exception { @@ -870,6 +1033,16 @@ public void testRenamePartitionNullNewPart() throws Exception { } @Test(expected = InvalidOperationException.class) + public void testRenamePartitionBogusCatalogName() throws Exception { + List> oldValues = createTable4PartColsParts(client); + List oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); + + Partition partToRename = oldParts.get(3); + partToRename.setValues(Lists.newArrayList("2018", "01", "16")); + client.renamePartition("nosuch", DB_NAME, TABLE_NAME, oldValues.get(3), partToRename); + } + + @Test(expected = InvalidOperationException.class) public void testRenamePartitionNoDbName() throws Exception { List> oldValues = createTable4PartColsParts(client); List oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java index b67f33df7b..30099e082d 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java @@ -19,6 +19,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -27,9 +28,11 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; @@ -37,10 +40,12 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TException; import org.apache.thrift.transport.TTransportException; import org.junit.After; import org.junit.Assert; @@ -79,10 +84,9 @@ public void setUp() throws Exception { // Clean up the database client.dropDatabase(DB_NAME, true, true, true); metaStore.cleanWarehouseDirs(); - Database db = new DatabaseBuilder() + new DatabaseBuilder() .setName(DB_NAME) - .build(); - client.createDatabase(db); + .create(client, metaStore.getConf()); tableWithPartitions = createTableWithPartitions(); externalTable = createExternalTable(); @@ -222,11 +226,11 @@ public void testAppendPartitionNullTable() throws Exception { client.appendPartition(tableWithPartitions.getDbName(), null, partitionValues); } - @Test(expected = MetaException.class) + @Test(expected = InvalidObjectException.class) public void testAppendPartitionEmptyPartValues() throws Exception { Table table = tableWithPartitions; - client.appendPartition(table.getDbName(), table.getTableName(), new ArrayList()); + client.appendPartition(table.getDbName(), table.getTableName(), new ArrayList<>()); } @Test @@ -236,7 +240,7 @@ public void testAppendPartitionNullPartValues() throws Exception { Table table = tableWithPartitions; client.appendPartition(table.getDbName(), table.getTableName(), (List) null); Assert.fail("Exception should have been thrown."); - } catch (TTransportException | NullPointerException e) { + } catch (TTransportException | InvalidObjectException e) { // TODO: NPE should not be thrown } } @@ -442,6 +446,57 @@ public void testAppendPartWrongColumnInPartName() throws Exception { client.appendPartition(table.getDbName(), table.getTableName(), partitionName); } + @Test + public void otherCatalog() throws TException { + String catName = "append_partition_catalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "append_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String tableName = "table_in_other_catalog"; + new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, metaStore.getConf()); + + Partition created = + client.appendPartition(catName, dbName, tableName, Collections.singletonList("a1")); + Assert.assertEquals(1, created.getValuesSize()); + Assert.assertEquals("a1", created.getValues().get(0)); + Partition fetched = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a1")); + Assert.assertEquals(created, fetched); + + created = client.appendPartition(catName, dbName, tableName, "partcol=a2"); + Assert.assertEquals(1, created.getValuesSize()); + Assert.assertEquals("a2", created.getValues().get(0)); + fetched = client.getPartition(catName, dbName, tableName, Collections.singletonList("a2")); + Assert.assertEquals(created, fetched); + } + + @Test(expected = InvalidObjectException.class) + public void testAppendPartitionBogusCatalog() throws Exception { + client.appendPartition("nosuch", DB_NAME, tableWithPartitions.getTableName(), + Lists.newArrayList("2017", "may")); + } + + @Test(expected = InvalidObjectException.class) + public void testAppendPartitionByNameBogusCatalog() throws Exception { + client.appendPartition("nosuch", DB_NAME, tableWithPartitions.getTableName(), + "year=2017/month=april"); + } + // Helper methods private Table createTableWithPartitions() throws Exception { @@ -477,7 +532,7 @@ private Table createView() throws Exception { private Table createTable(String tableName, List partCols, Map tableParams, String tableType, String location) throws Exception { - Table table = new TableBuilder() + new TableBuilder() .setDbName(DB_NAME) .setTableName(tableName) .addCol("test_id", "int", "test col id") @@ -486,17 +541,15 @@ private Table createTable(String tableName, List partCols, Map values) throws Exception { - Partition partition = new PartitionBuilder() - .fromTable(table) + new PartitionBuilder() + .inTable(table) .setValues(values) - .build(); - client.add_partition(partition); + .addToTable(client, metaStore.getConf()); } private static List getYearAndMonthPartCols() { diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestCatalogs.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestCatalogs.java new file mode 100644 index 0000000000..92db489849 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestCatalogs.java @@ -0,0 +1,215 @@ +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +@RunWith(Parameterized.class) +@Category(MetastoreCheckinTest.class) +public class TestCatalogs extends MetaStoreClientTest { + private static final Logger LOG = LoggerFactory.getLogger(TestCatalogs.class); + private final AbstractMetaStoreService metaStore; + private IMetaStoreClient client; + + public TestCatalogs(String name, AbstractMetaStoreService metaStore) throws Exception { + this.metaStore = metaStore; + this.metaStore.start(); + } + + @Before + public void setUp() throws Exception { + // Get new client + client = metaStore.getClient(); + + } + + @After + public void tearDown() throws Exception { + // Drop any left over catalogs + List catalogs = client.getCatalogs(); + for (String catName : catalogs) { + if (!catName.equalsIgnoreCase(Warehouse.DEFAULT_CATALOG_NAME)) { + // First drop any databases in catalog + List databases = client.getAllDatabases(catName); + for (String db : databases) { + client.dropDatabase(catName, db, true, false, true); + } + client.dropCatalog(catName); + } else { + List databases = client.getAllDatabases(catName); + for (String db : databases) { + if (!db.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { + client.dropDatabase(catName, db, true, false, true); + } + } + + } + } + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + @Test + public void catalogOperations() throws TException { + String[] catNames = {"cat1", "cat2", "ADifferentName"}; + String[] description = {"a description", "super descriptive", null}; + String[] location = {MetaStoreTestUtils.getTestWarehouseDir("cat1"), + MetaStoreTestUtils.getTestWarehouseDir("cat2"), + MetaStoreTestUtils.getTestWarehouseDir("different")}; + + for (int i = 0; i < catNames.length; i++) { + Catalog cat = new CatalogBuilder() + .setName(catNames[i]) + .setLocation(location[i]) + .setDescription(description[i]) + .build(); + client.createCatalog(cat); + File dir = new File(cat.getLocationUri()); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + } + + for (int i = 0; i < catNames.length; i++) { + Catalog cat = client.getCatalog(catNames[i]); + Assert.assertTrue(catNames[i].equalsIgnoreCase(cat.getName())); + Assert.assertEquals(description[i], cat.getDescription()); + Assert.assertEquals(location[i], cat.getLocationUri()); + File dir = new File(cat.getLocationUri()); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + + // Make sure there's a default database associated with each catalog + Database db = client.getDatabase(catNames[i], DEFAULT_DATABASE_NAME); + Assert.assertEquals("file:" + cat.getLocationUri(), db.getLocationUri()); + } + + List catalogs = client.getCatalogs(); + Assert.assertEquals(4, catalogs.size()); + catalogs.sort(Comparator.naturalOrder()); + List expected = new ArrayList<>(catNames.length + 1); + expected.add(Warehouse.DEFAULT_CATALOG_NAME); + expected.addAll(Arrays.asList(catNames)); + expected.sort(Comparator.naturalOrder()); + for (int i = 0; i < catalogs.size(); i++) { + Assert.assertTrue("Expected " + expected.get(i) + " actual " + catalogs.get(i), + catalogs.get(i).equalsIgnoreCase(expected.get(i))); + } + + for (int i = 0; i < catNames.length; i++) { + client.dropCatalog(catNames[i]); + File dir = new File(location[i]); + Assert.assertFalse(dir.exists()); + } + + catalogs = client.getCatalogs(); + Assert.assertEquals(1, catalogs.size()); + Assert.assertTrue(catalogs.get(0).equalsIgnoreCase(Warehouse.DEFAULT_CATALOG_NAME)); + } + + @Test(expected = NoSuchObjectException.class) + public void getNonExistentCatalog() throws TException { + client.getCatalog("noSuchCatalog"); + } + + @Test(expected = MetaException.class) + public void createCatalogWithBadLocation() throws TException { + Catalog cat = new CatalogBuilder() + .setName("goodluck") + .setLocation("/nosuchdir/nosuch") + .build(); + client.createCatalog(cat); + } + + @Test(expected = NoSuchObjectException.class) + public void dropNonExistentCatalog() throws TException { + client.dropCatalog("noSuchCatalog"); + } + + @Test(expected = MetaException.class) + public void dropHiveCatalog() throws TException { + client.dropCatalog(Warehouse.DEFAULT_CATALOG_NAME); + } + + @Test(expected = InvalidOperationException.class) + public void dropNonEmptyCatalog() throws TException { + String catName = "toBeDropped"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "dontDropMe"; + new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + client.dropCatalog(catName); + } + + @Test(expected = InvalidOperationException.class) + public void dropCatalogWithNonEmptyDefaultDb() throws TException { + String catName = "toBeDropped2"; + new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .create(client); + + new TableBuilder() + .setTableName("not_droppable") + .setCatName(catName) + .addCol("cola1", "bigint") + .create(client, metaStore.getConf()); + + client.dropCatalog(catName); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestCheckConstraint.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestCheckConstraint.java new file mode 100644 index 0000000000..7733b2d585 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestCheckConstraint.java @@ -0,0 +1,360 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLCheckConstraintBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.List; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +@RunWith(Parameterized.class) +@Category(MetastoreCheckinTest.class) +public class TestCheckConstraint extends MetaStoreClientTest { + private static final String OTHER_DATABASE = "test_uc_other_database"; + private static final String OTHER_CATALOG = "test_uc_other_catalog"; + private static final String DATABASE_IN_OTHER_CATALOG = "test_uc_database_in_other_catalog"; + private final AbstractMetaStoreService metaStore; + private IMetaStoreClient client; + private Table[] testTables = new Table[3]; + private Database inOtherCatalog; + + public TestCheckConstraint(String name, AbstractMetaStoreService metaStore) throws Exception { + this.metaStore = metaStore; + this.metaStore.start(); + } + + @Before + public void setUp() throws Exception { + // Get new client + client = metaStore.getClient(); + + // Clean up the database + client.dropDatabase(OTHER_DATABASE, true, true, true); + // Drop every table in the default database + for(String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) { + client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true); + } + + client.dropDatabase(OTHER_CATALOG, DATABASE_IN_OTHER_CATALOG, true, true, true); + try { + client.dropCatalog(OTHER_CATALOG); + } catch (NoSuchObjectException e) { + // NOP + } + + // Clean up trash + metaStore.cleanWarehouseDirs(); + + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); + + Catalog cat = new CatalogBuilder() + .setName(OTHER_CATALOG) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(OTHER_CATALOG)) + .build(); + client.createCatalog(cat); + + // For this one don't specify a location to make sure it gets put in the catalog directory + inOtherCatalog = new DatabaseBuilder() + .setName(DATABASE_IN_OTHER_CATALOG) + .setCatalogName(OTHER_CATALOG) + .create(client, metaStore.getConf()); + + testTables[0] = + new TableBuilder() + .setTableName("test_table_1") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[1] = + new TableBuilder() + .setDbName(OTHER_DATABASE) + .setTableName("test_table_2") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[2] = + new TableBuilder() + .inDb(inOtherCatalog) + .setTableName("test_table_3") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + // Reload tables from the MetaStore + for(int i=0; i < testTables.length; i++) { + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); + } + } + + @After + public void tearDown() throws Exception { + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + @Test + public void createGetDrop() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + CheckConstraintsRequest rqst = + new CheckConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getCheckConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List cc = new SQLCheckConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setCheckExpression("= 5") + .build(metaStore.getConf()); + client.addCheckConstraint(cc); + + rqst = new CheckConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getCheckConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals("= 5", fetched.get(0).getCheck_expression()); + Assert.assertEquals(table.getTableName() + "_check_constraint", fetched.get(0).getDc_name()); + String table0PkName = fetched.get(0).getDc_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a primary key + client.dropConstraint(table.getCatName(), table.getDbName(), + table.getTableName(), table0PkName); + rqst = new CheckConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getCheckConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addCheckConstraint(cc); + } + + @Test + public void inOtherCatalog() throws TException { + String constraintName = "occc"; + // Table in non 'hive' catalog + List cc = new SQLCheckConstraintBuilder() + .onTable(testTables[2]) + .addColumn("col1") + .setConstraintName(constraintName) + .setCheckExpression("like s%") + .build(metaStore.getConf()); + client.addCheckConstraint(cc); + + CheckConstraintsRequest rqst = new CheckConstraintsRequest(testTables[2].getCatName(), + testTables[2].getDbName(), testTables[2].getTableName()); + List fetched = client.getCheckConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(testTables[2].getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(testTables[2].getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals("like s%", fetched.get(0).getCheck_expression()); + Assert.assertEquals(constraintName, fetched.get(0).getDc_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(testTables[2].getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName(), constraintName); + rqst = new CheckConstraintsRequest(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName()); + fetched = client.getCheckConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void createTableWithConstraintsPk() throws TException { + String constraintName = "ctwccc"; + Table table = new TableBuilder() + .setTableName("table_with_constraints") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List cc = new SQLCheckConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setConstraintName(constraintName) + .setCheckExpression("> 0") + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, null, null, null, null, cc); + CheckConstraintsRequest rqst = new CheckConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getCheckConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals("> 0", fetched.get(0).getCheck_expression()); + Assert.assertEquals(constraintName, fetched.get(0).getDc_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), constraintName); + rqst = new CheckConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getCheckConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + } + + @Test + public void createTableWithConstraintsPkInOtherCatalog() throws TException { + Table table = new TableBuilder() + .setTableName("table_in_other_catalog_with_constraints") + .inDb(inOtherCatalog) + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List cc = new SQLCheckConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setCheckExpression("> 0") + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, null, null, null, null, cc); + CheckConstraintsRequest rqst = new CheckConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getCheckConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals("> 0", fetched.get(0).getCheck_expression()); + Assert.assertEquals(table.getTableName() + "_check_constraint", fetched.get(0).getDc_name()); + String tablePkName = fetched.get(0).getDc_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), tablePkName); + rqst = new CheckConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getCheckConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void doubleAddUniqueConstraint() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + CheckConstraintsRequest rqst = + new CheckConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getCheckConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List cc = new SQLCheckConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setCheckExpression("> 0") + .build(metaStore.getConf()); + client.addCheckConstraint(cc); + + try { + cc = new SQLCheckConstraintBuilder() + .onTable(table) + .addColumn("col2") + .setCheckExpression("= 'this string intentionally left empty'") + .build(metaStore.getConf()); + client.addCheckConstraint(cc); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void addNoSuchTable() throws TException { + try { + List cc = new SQLCheckConstraintBuilder() + .setTableName("nosuch") + .addColumn("col2") + .setCheckExpression("= 'this string intentionally left empty'") + .build(metaStore.getConf()); + client.addCheckConstraint(cc); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void getNoSuchTable() throws TException { + CheckConstraintsRequest rqst = + new CheckConstraintsRequest(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "nosuch"); + List cc = client.getCheckConstraints(rqst); + Assert.assertTrue(cc.isEmpty()); + } + + @Test + public void getNoSuchDb() throws TException { + CheckConstraintsRequest rqst = + new CheckConstraintsRequest(DEFAULT_CATALOG_NAME, "nosuch", testTables[0].getTableName()); + List cc = client.getCheckConstraints(rqst); + Assert.assertTrue(cc.isEmpty()); + } + + @Test + public void getNoSuchCatalog() throws TException { + CheckConstraintsRequest rqst = new CheckConstraintsRequest("nosuch", + testTables[0].getDbName(), testTables[0].getTableName()); + List cc = client.getCheckConstraints(rqst); + Assert.assertTrue(cc.isEmpty()); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java index f2d745eaad..24e3c5667a 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java @@ -20,8 +20,11 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; @@ -30,10 +33,13 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.FunctionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.hadoop.hive.metastore.utils.SecurityUtils; +import org.apache.thrift.TException; import org.apache.thrift.transport.TTransportException; import org.junit.After; import org.junit.Assert; @@ -43,12 +49,20 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import java.io.File; +import java.net.URI; +import java.net.URISyntaxException; import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; /** * Test class for IMetaStoreClient API. Testing the Database related functions. */ + @RunWith(Parameterized.class) @Category(MetastoreCheckinTest.class) public class TestDatabases extends MetaStoreClientTest { @@ -74,17 +88,16 @@ public void setUp() throws Exception { } testDatabases[0] = - new DatabaseBuilder().setName("test_database_1").build(); + new DatabaseBuilder().setName("test_database_1").create(client, metaStore.getConf()); testDatabases[1] = - new DatabaseBuilder().setName("test_database_to_find_1").build(); + new DatabaseBuilder().setName("test_database_to_find_1").create(client, metaStore.getConf()); testDatabases[2] = - new DatabaseBuilder().setName("test_database_to_find_2").build(); + new DatabaseBuilder().setName("test_database_to_find_2").create(client, metaStore.getConf()); testDatabases[3] = - new DatabaseBuilder().setName("test_database_hidden_1").build(); + new DatabaseBuilder().setName("test_database_hidden_1").create(client, metaStore.getConf()); // Create the databases, and reload them from the MetaStore - for(int i=0; i < testDatabases.length; i++) { - client.createDatabase(testDatabases[i]); + for (int i=0; i < testDatabases.length; i++) { testDatabases[i] = client.getDatabase(testDatabases[i].getName()); } } @@ -102,7 +115,6 @@ public void tearDown() throws Exception { /** * This test creates and queries a database and then drops it. Good for testing the happy path. - * @throws Exception */ @Test public void testCreateGetDeleteDatabase() throws Exception { @@ -127,10 +139,10 @@ public void testCreateGetDeleteDatabase() throws Exception { @Test public void testCreateDatabaseDefaultValues() throws Exception { - Database database = new Database(); - database.setName("dummy"); + Database database = new DatabaseBuilder() + .setName("dummy") + .create(client, metaStore.getConf()); - client.createDatabase(database); Database createdDatabase = client.getDatabase(database.getName()); Assert.assertNull("Comparing description", createdDatabase.getDescription()); @@ -139,7 +151,8 @@ public void testCreateDatabaseDefaultValues() throws Exception { Assert.assertEquals("Comparing parameters", new HashMap(), createdDatabase.getParameters()); Assert.assertNull("Comparing privileges", createdDatabase.getPrivileges()); - Assert.assertNull("Comparing owner name", createdDatabase.getOwnerName()); + Assert.assertEquals("Comparing owner name", SecurityUtils.getUser(), + createdDatabase.getOwnerName()); Assert.assertEquals("Comparing owner type", PrincipalType.USER, createdDatabase.getOwnerType()); } @@ -280,7 +293,7 @@ public void testDropDatabaseCaseInsensitive() throws Exception { @Test public void testDropDatabaseDeleteData() throws Exception { Database database = testDatabases[0]; - Path dataFile = new Path(database.getLocationUri().toString() + "/dataFile"); + Path dataFile = new Path(database.getLocationUri() + "/dataFile"); metaStore.createFile(dataFile, "100"); // Do not delete the data @@ -318,8 +331,7 @@ public void testDropDatabaseWithTable() throws Exception { .setDbName(database.getName()) .setTableName("test_table") .addCol("test_col", "int") - .build(); - client.createTable(testTable); + .create(client, metaStore.getConf()); client.dropDatabase(database.getName(), true, true, false); } @@ -332,8 +344,7 @@ public void testDropDatabaseWithTableCascade() throws Exception { .setDbName(database.getName()) .setTableName("test_table") .addCol("test_col", "int") - .build(); - client.createTable(testTable); + .create(client, metaStore.getConf()); client.dropDatabase(database.getName(), true, true, true); Assert.assertFalse("The directory should be removed", @@ -349,9 +360,8 @@ public void testDropDatabaseWithFunction() throws Exception { .setDbName(database.getName()) .setName("test_function") .setClass("org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper") - .build(); + .create(client, metaStore.getConf()); - client.createFunction(testFunction); client.dropDatabase(database.getName(), true, true, false); } @@ -365,16 +375,14 @@ public void testDropDatabaseWithFunctionCascade() throws Exception { .setDbName(database.getName()) .setName("test_function") .setClass("org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper") - .build(); + .create(client, metaStore.getConf()); - client.createFunction(testFunction); client.dropDatabase(database.getName(), true, true, true); Assert.assertFalse("The directory should be removed", metaStore.isPathExists(new Path(database.getLocationUri()))); } - @Test public void testGetAllDatabases() throws Exception { List allDatabases = client.getAllDatabases(); @@ -446,7 +454,7 @@ public void testAlterDatabase() throws Exception { .setDescription("dummy description 2") .addParam("param_key_1", "param_value_1_2") .addParam("param_key_2_3", "param_value_2_3") - .build(); + .build(metaStore.getConf()); client.alterDatabase(originalDatabase.getName(), newDatabase); Database alteredDatabase = client.getDatabase(newDatabase.getName()); @@ -460,6 +468,7 @@ public void testAlterDatabaseNotNullableFields() throws Exception { Database originalDatabase = client.getDatabase(database.getName()); Database newDatabase = new Database(); newDatabase.setName("new_name"); + newDatabase.setCatalogName(DEFAULT_CATALOG_NAME); client.alterDatabase(originalDatabase.getName(), newDatabase); // The name should not be changed, so reload the db with the original name @@ -480,7 +489,9 @@ public void testAlterDatabaseNotNullableFields() throws Exception { @Test(expected = NoSuchObjectException.class) public void testAlterDatabaseNoSuchDatabase() throws Exception { - Database newDatabase = new DatabaseBuilder().setName("test_database_altered").build(); + Database newDatabase = new DatabaseBuilder() + .setName("test_database_altered") + .build(metaStore.getConf()); client.alterDatabase("no_such_database", newDatabase); } @@ -505,6 +516,131 @@ public void testAlterDatabaseCaseInsensitive() throws Exception { Assert.assertEquals("Comparing databases", newDatabase, alteredDatabase); } + @Test + public void databasesInCatalogs() throws TException, URISyntaxException { + String catName = "mycatalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String[] dbNames = {"db1", "db9"}; + Database[] dbs = new Database[2]; + // For this one don't specify a location to make sure it gets put in the catalog directory + dbs[0] = new DatabaseBuilder() + .setName(dbNames[0]) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + // For the second one, explicitly set a location to make sure it ends up in the specified place. + String db1Location = MetaStoreTestUtils.getTestWarehouseDir(dbNames[1]); + dbs[1] = new DatabaseBuilder() + .setName(dbNames[1]) + .setCatalogName(catName) + .setLocation(db1Location) + .create(client, metaStore.getConf()); + + Database fetched = client.getDatabase(catName, dbNames[0]); + String expectedLocation = new File(cat.getLocationUri(), dbNames[0] + ".db").toURI().toString(); + Assert.assertEquals(expectedLocation, fetched.getLocationUri() + "/"); + String db0Location = new URI(fetched.getLocationUri()).getPath(); + File dir = new File(db0Location); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + + fetched = client.getDatabase(catName, dbNames[1]); + Assert.assertEquals(new File(db1Location).toURI().toString(), fetched.getLocationUri() + "/"); + dir = new File(new URI(fetched.getLocationUri()).getPath()); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + + Set fetchedDbs = new HashSet<>(client.getAllDatabases(catName)); + Assert.assertEquals(3, fetchedDbs.size()); + for (String dbName : dbNames) Assert.assertTrue(fetchedDbs.contains(dbName)); + + fetchedDbs = new HashSet<>(client.getAllDatabases()); + Assert.assertEquals(5, fetchedDbs.size()); + Assert.assertTrue(fetchedDbs.contains(Warehouse.DEFAULT_DATABASE_NAME)); + + // Intentionally using the deprecated method to make sure it returns correct results. + fetchedDbs = new HashSet<>(client.getAllDatabases()); + Assert.assertEquals(5, fetchedDbs.size()); + Assert.assertTrue(fetchedDbs.contains(Warehouse.DEFAULT_DATABASE_NAME)); + + fetchedDbs = new HashSet<>(client.getDatabases(catName, "d*")); + Assert.assertEquals(3, fetchedDbs.size()); + for (String dbName : dbNames) Assert.assertTrue(fetchedDbs.contains(dbName)); + + fetchedDbs = new HashSet<>(client.getDatabases("d*")); + Assert.assertEquals(1, fetchedDbs.size()); + Assert.assertTrue(fetchedDbs.contains(Warehouse.DEFAULT_DATABASE_NAME)); + + // Intentionally using the deprecated method to make sure it returns correct results. + fetchedDbs = new HashSet<>(client.getDatabases("d*")); + Assert.assertEquals(1, fetchedDbs.size()); + Assert.assertTrue(fetchedDbs.contains(Warehouse.DEFAULT_DATABASE_NAME)); + + fetchedDbs = new HashSet<>(client.getDatabases(catName, "*1")); + Assert.assertEquals(1, fetchedDbs.size()); + Assert.assertTrue(fetchedDbs.contains(dbNames[0])); + + fetchedDbs = new HashSet<>(client.getDatabases("*9")); + Assert.assertEquals(0, fetchedDbs.size()); + + // Intentionally using the deprecated method to make sure it returns correct results. + fetchedDbs = new HashSet<>(client.getDatabases("*9")); + Assert.assertEquals(0, fetchedDbs.size()); + + fetchedDbs = new HashSet<>(client.getDatabases(catName, "*x")); + Assert.assertEquals(0, fetchedDbs.size()); + + // Check that dropping database from wrong catalog fails + try { + client.dropDatabase(dbNames[0], true, false, false); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + + // Check that dropping database from wrong catalog fails + try { + // Intentionally using deprecated method + client.dropDatabase(dbNames[0], true, false, false); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + + // Drop them from the proper catalog + client.dropDatabase(catName, dbNames[0], true, false, false); + dir = new File(db0Location); + Assert.assertFalse(dir.exists()); + + client.dropDatabase(catName, dbNames[1], true, false, false); + dir = new File(db1Location); + Assert.assertFalse(dir.exists()); + + fetchedDbs = new HashSet<>(client.getAllDatabases(catName)); + Assert.assertEquals(1, fetchedDbs.size()); + } + + @Test(expected = InvalidObjectException.class) + public void createDatabaseInNonExistentCatalog() throws TException { + Database db = new DatabaseBuilder() + .setName("doomed") + .setCatalogName("nosuch") + .create(client, metaStore.getConf()); + } + + @Test(expected = NoSuchObjectException.class) + public void fetchDatabaseInNonExistentCatalog() throws TException { + client.getDatabase("nosuch", Warehouse.DEFAULT_DATABASE_NAME); + } + + @Test(expected = NoSuchObjectException.class) + public void dropDatabaseInNonExistentCatalog() throws TException { + client.dropDatabase("nosuch", Warehouse.DEFAULT_DATABASE_NAME, true, false, false); + } + private Database getDatabaseWithAllParametersSet() throws Exception { return new DatabaseBuilder() .setName("dummy") @@ -514,6 +650,6 @@ private Database getDatabaseWithAllParametersSet() throws Exception { .setDescription("dummy description") .addParam("param_key_1", "param_value_1") .addParam("param_key_2", "param_value_2") - .build(); + .build(metaStore.getConf()); } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDefaultConstraint.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDefaultConstraint.java new file mode 100644 index 0000000000..d56006be61 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDefaultConstraint.java @@ -0,0 +1,360 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLDefaultConstraintBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.List; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +@RunWith(Parameterized.class) +@Category(MetastoreCheckinTest.class) +public class TestDefaultConstraint extends MetaStoreClientTest { + private static final String OTHER_DATABASE = "test_uc_other_database"; + private static final String OTHER_CATALOG = "test_uc_other_catalog"; + private static final String DATABASE_IN_OTHER_CATALOG = "test_uc_database_in_other_catalog"; + private final AbstractMetaStoreService metaStore; + private IMetaStoreClient client; + private Table[] testTables = new Table[3]; + private Database inOtherCatalog; + + public TestDefaultConstraint(String name, AbstractMetaStoreService metaStore) throws Exception { + this.metaStore = metaStore; + this.metaStore.start(); + } + + @Before + public void setUp() throws Exception { + // Get new client + client = metaStore.getClient(); + + // Clean up the database + client.dropDatabase(OTHER_DATABASE, true, true, true); + // Drop every table in the default database + for(String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) { + client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true); + } + + client.dropDatabase(OTHER_CATALOG, DATABASE_IN_OTHER_CATALOG, true, true, true); + try { + client.dropCatalog(OTHER_CATALOG); + } catch (NoSuchObjectException e) { + // NOP + } + + // Clean up trash + metaStore.cleanWarehouseDirs(); + + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); + + Catalog cat = new CatalogBuilder() + .setName(OTHER_CATALOG) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(OTHER_CATALOG)) + .build(); + client.createCatalog(cat); + + // For this one don't specify a location to make sure it gets put in the catalog directory + inOtherCatalog = new DatabaseBuilder() + .setName(DATABASE_IN_OTHER_CATALOG) + .setCatalogName(OTHER_CATALOG) + .create(client, metaStore.getConf()); + + testTables[0] = + new TableBuilder() + .setTableName("test_table_1") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[1] = + new TableBuilder() + .setDbName(OTHER_DATABASE) + .setTableName("test_table_2") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[2] = + new TableBuilder() + .inDb(inOtherCatalog) + .setTableName("test_table_3") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + // Reload tables from the MetaStore + for(int i=0; i < testTables.length; i++) { + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); + } + } + + @After + public void tearDown() throws Exception { + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + @Test + public void createGetDrop() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + DefaultConstraintsRequest rqst = + new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getDefaultConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List dv = new SQLDefaultConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setDefaultVal(0) + .build(metaStore.getConf()); + client.addDefaultConstraint(dv); + + rqst = new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getDefaultConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals("0", fetched.get(0).getDefault_value()); + Assert.assertEquals(table.getTableName() + "_default_value", fetched.get(0).getDc_name()); + String table0PkName = fetched.get(0).getDc_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a primary key + client.dropConstraint(table.getCatName(), table.getDbName(), + table.getTableName(), table0PkName); + rqst = new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getDefaultConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addDefaultConstraint(dv); + } + + @Test + public void inOtherCatalog() throws TException { + String constraintName = "ocdv"; + // Table in non 'hive' catalog + List dv = new SQLDefaultConstraintBuilder() + .onTable(testTables[2]) + .addColumn("col1") + .setConstraintName(constraintName) + .setDefaultVal("empty") + .build(metaStore.getConf()); + client.addDefaultConstraint(dv); + + DefaultConstraintsRequest rqst = new DefaultConstraintsRequest(testTables[2].getCatName(), + testTables[2].getDbName(), testTables[2].getTableName()); + List fetched = client.getDefaultConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(testTables[2].getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(testTables[2].getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals("empty", fetched.get(0).getDefault_value()); + Assert.assertEquals(constraintName, fetched.get(0).getDc_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(testTables[2].getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName(), constraintName); + rqst = new DefaultConstraintsRequest(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName()); + fetched = client.getDefaultConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void createTableWithConstraintsPk() throws TException { + String constraintName = "ctwcdv"; + Table table = new TableBuilder() + .setTableName("table_with_constraints") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List dv = new SQLDefaultConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setConstraintName(constraintName) + .setDefaultVal(0) + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, null, null, null, dv, null); + DefaultConstraintsRequest rqst = new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getDefaultConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals("0", fetched.get(0).getDefault_value()); + Assert.assertEquals(constraintName, fetched.get(0).getDc_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), constraintName); + rqst = new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getDefaultConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + } + + @Test + public void createTableWithConstraintsPkInOtherCatalog() throws TException { + Table table = new TableBuilder() + .setTableName("table_in_other_catalog_with_constraints") + .inDb(inOtherCatalog) + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List dv = new SQLDefaultConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setDefaultVal(0) + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, null, null, null, dv, null); + DefaultConstraintsRequest rqst = new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getDefaultConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals("0", fetched.get(0).getDefault_value()); + Assert.assertEquals(table.getTableName() + "_default_value", fetched.get(0).getDc_name()); + String tablePkName = fetched.get(0).getDc_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), tablePkName); + rqst = new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getDefaultConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void doubleAddUniqueConstraint() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + DefaultConstraintsRequest rqst = + new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getDefaultConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List dv = new SQLDefaultConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setDefaultVal(0) + .build(metaStore.getConf()); + client.addDefaultConstraint(dv); + + try { + dv = new SQLDefaultConstraintBuilder() + .onTable(table) + .addColumn("col2") + .setDefaultVal("this string intentionally left empty") + .build(metaStore.getConf()); + client.addDefaultConstraint(dv); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void addNoSuchTable() throws TException { + try { + List dv = new SQLDefaultConstraintBuilder() + .setTableName("nosuch") + .addColumn("col2") + .setDefaultVal("this string intentionally left empty") + .build(metaStore.getConf()); + client.addDefaultConstraint(dv); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void getNoSuchTable() throws TException { + DefaultConstraintsRequest rqst = + new DefaultConstraintsRequest(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "nosuch"); + List dv = client.getDefaultConstraints(rqst); + Assert.assertTrue(dv.isEmpty()); + } + + @Test + public void getNoSuchDb() throws TException { + DefaultConstraintsRequest rqst = + new DefaultConstraintsRequest(DEFAULT_CATALOG_NAME, "nosuch", testTables[0].getTableName()); + List dv = client.getDefaultConstraints(rqst); + Assert.assertTrue(dv.isEmpty()); + } + + @Test + public void getNoSuchCatalog() throws TException { + DefaultConstraintsRequest rqst = new DefaultConstraintsRequest("nosuch", + testTables[0].getDbName(), testTables[0].getTableName()); + List dv = client.getDefaultConstraints(rqst); + Assert.assertTrue(dv.isEmpty()); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDropPartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDropPartitions.java index d2ba4be7c0..9037001504 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDropPartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDropPartitions.java @@ -18,25 +18,31 @@ package org.apache.hadoop.hive.metastore.client; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TException; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -48,6 +54,8 @@ import com.google.common.collect.Lists; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + /** * Tests for dropping partitions. */ @@ -69,7 +77,7 @@ public static void startMetaStores() { Map msConf = new HashMap(); // Enable trash, so it can be tested - Map extraConf = new HashMap(); + Map extraConf = new HashMap<>(); extraConf.put("fs.trash.checkpoint.interval", "30"); // FS_TRASH_CHECKPOINT_INTERVAL_KEY extraConf.put("fs.trash.interval", "30"); // FS_TRASH_INTERVAL_KEY (hadoop-2) startMetaStores(msConf, extraConf); @@ -89,8 +97,7 @@ public void setUp() throws Exception { metaStore.cleanWarehouseDirs(); Database db = new DatabaseBuilder(). setName(DB_NAME). - build(); - client.createDatabase(db); + create(client, metaStore.getConf()); // Create test tables with 3 partitions createTable(TABLE_NAME, getYearAndMonthPartCols(), null); @@ -489,7 +496,71 @@ public void testDropPartitionByNameEmptyName() throws Exception { client.dropPartition(DB_NAME, TABLE_NAME, "", true); } - // Helper methods + @Test + public void otherCatalog() throws TException { + String catName = "drop_partition_catalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "drop_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, metaStore.getConf()); + + Partition[] parts = new Partition[2]; + for (int i = 0; i < parts.length; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .build(metaStore.getConf()); + } + client.add_partitions(Arrays.asList(parts)); + List fetched = client.listPartitions(catName, dbName, tableName, (short)-1); + Assert.assertEquals(parts.length, fetched.size()); + + Assert.assertTrue(client.dropPartition(catName, dbName, tableName, + Collections.singletonList("a0"), PartitionDropOptions.instance().ifExists(false))); + try { + client.getPartition(catName, dbName, tableName, Collections.singletonList("a0")); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + + Assert.assertTrue(client.dropPartition(catName, dbName, tableName, "partcol=a1", true)); + try { + client.getPartition(catName, dbName, tableName, Collections.singletonList("a1")); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + } + + @Test(expected = NoSuchObjectException.class) + public void testDropPartitionBogusCatalog() throws Exception { + client.dropPartition("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList("2017"), false); + } + + @Test(expected = NoSuchObjectException.class) + public void testDropPartitionByNameBogusCatalog() throws Exception { + client.dropPartition("nosuch", DB_NAME, TABLE_NAME, "year=2017", false); + } + + + // Helper methods private Table createTable(String tableName, List partCols, Map tableParams) throws Exception { @@ -501,36 +572,33 @@ private Table createTable(String tableName, List partCols, .setPartCols(partCols) .setLocation(metaStore.getWarehouseRoot() + "/" + tableName) .setTableParams(tableParams) - .build(); - client.createTable(table); + .create(client, metaStore.getConf()); return table; } private Partition createPartition(List values, List partCols) throws Exception { - Partition partition = new PartitionBuilder() + new PartitionBuilder() .setDbName(DB_NAME) .setTableName(TABLE_NAME) .setValues(values) .setCols(partCols) - .build(); - client.add_partition(partition); - partition = client.getPartition(DB_NAME, TABLE_NAME, values); + .addToTable(client, metaStore.getConf()); + Partition partition = client.getPartition(DB_NAME, TABLE_NAME, values); return partition; } private Partition createPartition(String tableName, String location, List values, List partCols, Map partParams) throws Exception { - Partition partition = new PartitionBuilder() + new PartitionBuilder() .setDbName(DB_NAME) .setTableName(tableName) .setValues(values) .setCols(partCols) .setLocation(location) .setPartParams(partParams) - .build(); - client.add_partition(partition); - partition = client.getPartition(DB_NAME, tableName, values); + .addToTable(client, metaStore.getConf()); + Partition partition = client.getPartition(DB_NAME, tableName, values); return partition; } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java index 5a7c71c109..473b17122f 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; -import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -1162,10 +1161,9 @@ public void testExchangePartitionNoPartExistsYearAndMonthSet() throws Exception // Helper methods private void createDB(String dbName) throws TException { - Database db = new DatabaseBuilder() + new DatabaseBuilder() .setName(dbName) - .build(); - client.createDatabase(db); + .create(client, metaStore.getConf()); } private Table createSourceTable() throws Exception { @@ -1186,14 +1184,13 @@ private Table createTable(String dbName, String tableName, List par private Table createTable(String dbName, String tableName, List partCols, List cols, String location) throws Exception { - Table table = new TableBuilder() + new TableBuilder() .setDbName(dbName) .setTableName(tableName) .setCols(cols) .setPartCols(partCols) .setLocation(location) - .build(); - client.createTable(table); + .create(client, metaStore.getConf()); return client.getTable(dbName, tableName); } @@ -1244,7 +1241,7 @@ private Partition buildPartition(Table table, List values, String locati .addStorageDescriptorParam("test_exch_sd_param_key", "test_exch_sd_param_value") .setCols(getYearMonthAndDayPartCols()) .setLocation(location) - .build(); + .build(metaStore.getConf()); return partition; } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java new file mode 100644 index 0000000000..d8192b10ac --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java @@ -0,0 +1,535 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLForeignKeyBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLPrimaryKeyBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.List; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +@RunWith(Parameterized.class) +@Category(MetastoreCheckinTest.class) +public class TestForeignKey extends MetaStoreClientTest { + private static final String OTHER_DATABASE = "test_fk_other_database"; + private static final String OTHER_CATALOG = "test_fk_other_catalog"; + private static final String DATABASE_IN_OTHER_CATALOG = "test_fk_database_in_other_catalog"; + private final AbstractMetaStoreService metaStore; + private IMetaStoreClient client; + private Table[] testTables = new Table[4]; + private Database inOtherCatalog; + + public TestForeignKey(String name, AbstractMetaStoreService metaStore) throws Exception { + this.metaStore = metaStore; + this.metaStore.start(); + } + + @Before + public void setUp() throws Exception { + // Get new client + client = metaStore.getClient(); + + // Clean up the database + client.dropDatabase(OTHER_DATABASE, true, true, true); + // Drop every table in the default database + for(String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) { + client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true); + } + + client.dropDatabase(OTHER_CATALOG, DATABASE_IN_OTHER_CATALOG, true, true, true); + try { + client.dropCatalog(OTHER_CATALOG); + } catch (NoSuchObjectException e) { + // NOP + } + + // Clean up trash + metaStore.cleanWarehouseDirs(); + + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); + + Catalog cat = new CatalogBuilder() + .setName(OTHER_CATALOG) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(OTHER_CATALOG)) + .build(); + client.createCatalog(cat); + + // For this one don't specify a location to make sure it gets put in the catalog directory + inOtherCatalog = new DatabaseBuilder() + .setName(DATABASE_IN_OTHER_CATALOG) + .setCatalogName(OTHER_CATALOG) + .create(client, metaStore.getConf()); + + testTables[0] = + new TableBuilder() + .setTableName("test_table_1") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[1] = + new TableBuilder() + .setDbName(OTHER_DATABASE) + .setTableName("test_table_2") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[2] = + new TableBuilder() + .inDb(inOtherCatalog) + .setTableName("test_table_3") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[3] = + new TableBuilder() + .inDb(inOtherCatalog) + .setTableName("test_table_4") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + // Reload tables from the MetaStore + for(int i=0; i < testTables.length; i++) { + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); + } + } + + @After + public void tearDown() throws Exception { + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + @Test + public void createGetDrop() throws TException { + Table parentTable = testTables[1]; + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + ForeignKeysRequest rqst = + new ForeignKeysRequest(parentTable.getDbName(), parentTable.getTableName(), + table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getForeignKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + List fk = new SQLForeignKeyBuilder() + .fromPrimaryKey(pk) + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addForeignKey(fk); + + + rqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable.getTableName(), + table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getForeignKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getFktable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getFktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(parentTable.getDbName(), fetched.get(0).getPktable_db()); + Assert.assertEquals(parentTable.getTableName(), fetched.get(0).getPktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(parentTable.getTableName() + "_primary_key", fetched.get(0).getPk_name()); + Assert.assertEquals(table.getTableName() + "_to_" + parentTable.getTableName() + + "_foreign_key", fetched.get(0).getFk_name()); + String table0FkName = fetched.get(0).getFk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a foreign key + client.dropConstraint(table.getCatName(), table.getDbName(), + table.getTableName(), table0FkName); + rqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable.getTableName(), + table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getForeignKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addForeignKey(fk); + } + + @Test + public void createGetDrop2Column() throws TException { + Table parentTable = testTables[1]; + Table table = testTables[0]; + String constraintName = "2colfk"; + + // Single column unnamed primary key in default catalog and database + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .addColumn("col2") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + List fk = new SQLForeignKeyBuilder() + .fromPrimaryKey(pk) + .onTable(table) + .addColumn("col1") + .addColumn("col2") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + client.addForeignKey(fk); + + + ForeignKeysRequest rqst = new ForeignKeysRequest(parentTable.getDbName(), + parentTable.getTableName(), table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getForeignKeys(rqst); + Assert.assertEquals(2, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getFktable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getFktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals("col2", fetched.get(1).getFkcolumn_name()); + Assert.assertEquals(parentTable.getDbName(), fetched.get(0).getPktable_db()); + Assert.assertEquals(parentTable.getTableName(), fetched.get(0).getPktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals("col2", fetched.get(1).getFkcolumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(parentTable.getTableName() + "_primary_key", fetched.get(0).getPk_name()); + Assert.assertEquals(constraintName, fetched.get(0).getFk_name()); + String table0FkName = fetched.get(0).getFk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a foreign key + client.dropConstraint(table.getCatName(), table.getDbName(), + table.getTableName(), table0FkName); + rqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable.getTableName(), + table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getForeignKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addForeignKey(fk); + } + + @Test + public void inOtherCatalog() throws TException { + Table parentTable = testTables[2]; + Table table = testTables[3]; + String constraintName = "othercatfk"; + + // Single column unnamed primary key in default catalog and database + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + List fk = new SQLForeignKeyBuilder() + .fromPrimaryKey(pk) + .onTable(table) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + client.addForeignKey(fk); + + + ForeignKeysRequest rqst = new ForeignKeysRequest(parentTable.getDbName(), + parentTable.getTableName(), table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getForeignKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getFktable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getFktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(parentTable.getDbName(), fetched.get(0).getPktable_db()); + Assert.assertEquals(parentTable.getTableName(), fetched.get(0).getPktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(parentTable.getTableName() + "_primary_key", fetched.get(0).getPk_name()); + Assert.assertEquals(constraintName, fetched.get(0).getFk_name()); + String table0FkName = fetched.get(0).getFk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a foreign key + client.dropConstraint(table.getCatName(), table.getDbName(), + table.getTableName(), table0FkName); + rqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable.getTableName(), + table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getForeignKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addForeignKey(fk); + } + + @Test + public void createTableWithConstraints() throws TException { + String constraintName = "ctwckk"; + Table parentTable = testTables[0]; + Table table = new TableBuilder() + .setTableName("table_with_constraints") + .setDbName(parentTable.getDbName()) + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + List fk = new SQLForeignKeyBuilder() + .fromPrimaryKey(pk) + .onTable(table) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, fk, null, null, null, null); + + ForeignKeysRequest rqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable + .getTableName(), + table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getForeignKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getFktable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getFktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(parentTable.getDbName(), fetched.get(0).getPktable_db()); + Assert.assertEquals(parentTable.getTableName(), fetched.get(0).getPktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(parentTable.getTableName() + "_primary_key", fetched.get(0).getPk_name()); + Assert.assertEquals(constraintName, fetched.get(0).getFk_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + } + + @Test + public void createTableWithConstraintsInOtherCatalog() throws TException { + String constraintName = "ctwcocfk"; + Table parentTable = testTables[2]; + Table table = new TableBuilder() + .setTableName("table_with_constraints") + .inDb(inOtherCatalog) + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + List fk = new SQLForeignKeyBuilder() + .fromPrimaryKey(pk) + .onTable(table) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, fk, null, null, null, null); + + ForeignKeysRequest rqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable + .getTableName(), + table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getForeignKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getFktable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getFktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(parentTable.getDbName(), fetched.get(0).getPktable_db()); + Assert.assertEquals(parentTable.getTableName(), fetched.get(0).getPktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(parentTable.getTableName() + "_primary_key", fetched.get(0).getPk_name()); + Assert.assertEquals(constraintName, fetched.get(0).getFk_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + } + + @Test(expected = MetaException.class) + public void noSuchPk() throws TException { + List pk = new SQLPrimaryKeyBuilder() + .onTable(testTables[1]) + .addColumn("col1") + .build(metaStore.getConf()); + // Don't actually create the key + List fk = new SQLForeignKeyBuilder() + .onTable(testTables[0]) + .fromPrimaryKey(pk) + .addColumn("col2") + .build(metaStore.getConf()); + client.addForeignKey(fk); + Assert.fail(); + } + + @Test + public void addNoSuchTable() throws TException { + Table parentTable = testTables[0]; + + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + try { + List fk = new SQLForeignKeyBuilder() + .setTableName("nosuch") + .fromPrimaryKey(pk) + .addColumn("col2") + .build(metaStore.getConf()); + client.addForeignKey(fk); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void addNoSuchDb() throws TException { + Table parentTable = testTables[0]; + + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + try { + List fk = new SQLForeignKeyBuilder() + .setTableName(testTables[0].getTableName()) + .setDbName("nosuch") + .fromPrimaryKey(pk) + .addColumn("col2") + .build(metaStore.getConf()); + client.addForeignKey(fk); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void addNoSuchCatalog() throws TException { + Table parentTable = testTables[0]; + + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + try { + List fk = new SQLForeignKeyBuilder() + .setTableName(testTables[0].getTableName()) + .setDbName(testTables[0].getDbName()) + .setCatName("nosuch") + .fromPrimaryKey(pk) + .addColumn("col2") + .build(metaStore.getConf()); + client.addForeignKey(fk); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void foreignKeyAcrossCatalogs() throws TException { + Table parentTable = testTables[2]; + Table table = testTables[0]; + + // Single column unnamed primary key in default catalog and database + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + try { + List fk = new SQLForeignKeyBuilder() + .fromPrimaryKey(pk) + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addForeignKey(fk); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestFunctions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestFunctions.java index d504f34321..9857c4ea67 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestFunctions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestFunctions.java @@ -19,8 +19,11 @@ package org.apache.hadoop.hive.metastore.client; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.FunctionType; import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse; @@ -30,10 +33,12 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.ResourceType; import org.apache.hadoop.hive.metastore.api.ResourceUri; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.FunctionBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; import org.apache.thrift.transport.TTransportException; import org.junit.After; import org.junit.Assert; @@ -43,7 +48,11 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import java.util.HashSet; import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; /** * Test class for IMetaStoreClient API. Testing the Function related functions. @@ -82,27 +91,27 @@ public void setUp() throws Exception { .addResourceUri(new ResourceUri(ResourceType.JAR, "hdfs:///tmp/jar1.jar")) .addResourceUri(new ResourceUri(ResourceType.FILE, "hdfs:///tmp/file1.txt")) .addResourceUri(new ResourceUri(ResourceType.ARCHIVE, "hdfs:///tmp/archive1.tgz")) - .build(); + .build(metaStore.getConf()); testFunctions[1] = new FunctionBuilder() .setDbName(DEFAULT_DATABASE) .setName("test_function_to_find_2") .setClass(TEST_FUNCTION_CLASS) - .build(); + .build(metaStore.getConf()); testFunctions[2] = new FunctionBuilder() .setDbName(DEFAULT_DATABASE) .setName("test_function_hidden_1") .setClass(TEST_FUNCTION_CLASS) - .build(); + .build(metaStore.getConf()); - client.createDatabase(new DatabaseBuilder().setName(OTHER_DATABASE).build()); + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); testFunctions[3] = new FunctionBuilder() .setDbName(OTHER_DATABASE) .setName("test_function_to_find_1") .setClass(TEST_FUNCTION_CLASS) - .build(); + .build(metaStore.getConf()); // Create the functions, and reload them from the MetaStore for(int i=0; i < testFunctions.length; i++) { @@ -125,7 +134,6 @@ public void tearDown() throws Exception { /** * This test creates and queries a function and then drops it. Good for testing the happy path. - * @throws Exception */ @Test public void testCreateGetDeleteFunction() throws Exception { @@ -141,9 +149,7 @@ public void testCreateGetDeleteFunction() throws Exception { .addResourceUri(new ResourceUri(ResourceType.JAR, "hdfs:///tmp/jar1.jar")) .addResourceUri(new ResourceUri(ResourceType.FILE, "hdfs:///tmp/file1.txt")) .addResourceUri(new ResourceUri(ResourceType.ARCHIVE, "hdfs:///tmp/archive1.tgz")) - .build(); - - client.createFunction(function); + .create(client, metaStore.getConf()); Function createdFunction = client.getFunction(function.getDbName(), function.getFunctionName()); @@ -516,7 +522,7 @@ public void testAlterFunction() throws Exception { .setOwnerType(PrincipalType.GROUP) .setClass("org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper2") .setFunctionType(FunctionType.JAVA) - .build(); + .build(metaStore.getConf()); client.alterFunction(testFunctions[0].getDbName(), testFunctions[0].getFunctionName(), newFunction); @@ -565,7 +571,7 @@ private Function getNewFunction() throws MetaException { return new FunctionBuilder() .setName("test_function_2") .setClass(TEST_FUNCTION_CLASS) - .build(); + .build(metaStore.getConf()); } @Test(expected = MetaException.class) @@ -797,7 +803,7 @@ public void testAlterFunctionCaseInsensitive() throws Exception { .setDbName(OTHER_DATABASE) .setName("test_function_2") .setClass(TEST_FUNCTION_CLASS) - .build(); + .build(metaStore.getConf()); Function originalFunction = testFunctions[1]; // Test in upper case @@ -832,4 +838,100 @@ public void testAlterFunctionCaseInsensitive() throws Exception { // Expected exception } } + + @Test + public void otherCatalog() throws TException { + String catName = "functions_catalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "functions_other_catalog_db"; + Database db = new DatabaseBuilder() + .setCatalogName(catName) + .setName(dbName) + .create(client, metaStore.getConf()); + + String functionName = "test_function"; + Function function = + new FunctionBuilder() + .inDb(db) + .setName(functionName) + .setClass(TEST_FUNCTION_CLASS) + .setFunctionType(FunctionType.JAVA) + .setOwnerType(PrincipalType.ROLE) + .setOwner("owner") + .setCreateTime(100) + .addResourceUri(new ResourceUri(ResourceType.JAR, "hdfs:///tmp/jar1.jar")) + .addResourceUri(new ResourceUri(ResourceType.FILE, "hdfs:///tmp/file1.txt")) + .addResourceUri(new ResourceUri(ResourceType.ARCHIVE, "hdfs:///tmp/archive1.tgz")) + .create(client, metaStore.getConf()); + + Function createdFunction = client.getFunction(catName, dbName, functionName); + // The createTime will be set on the server side, so the comparison should skip it + function.setCreateTime(createdFunction.getCreateTime()); + Assert.assertEquals("Comparing functions", function, createdFunction); + + String f2Name = "testy_function2"; + Function f2 = new FunctionBuilder() + .inDb(db) + .setName(f2Name) + .setClass(TEST_FUNCTION_CLASS) + .create(client, metaStore.getConf()); + + Set functions = new HashSet<>(client.getFunctions(catName, dbName, "test*")); + Assert.assertEquals(2, functions.size()); + Assert.assertTrue(functions.contains(functionName)); + Assert.assertTrue(functions.contains(f2Name)); + + functions = new HashSet<>(client.getFunctions(catName, dbName, "test_*")); + Assert.assertEquals(1, functions.size()); + Assert.assertTrue(functions.contains(functionName)); + Assert.assertFalse(functions.contains(f2Name)); + + client.dropFunction(function.getCatName(), function.getDbName(), function.getFunctionName()); + try { + client.getFunction(function.getCatName(), function.getDbName(), function.getFunctionName()); + Assert.fail("Expected a NoSuchObjectException to be thrown"); + } catch (NoSuchObjectException exception) { + // Expected exception + } + } + + @Test(expected = NoSuchObjectException.class) + public void addNoSuchCatalog() throws TException { + String functionName = "test_function"; + new FunctionBuilder() + .setName(functionName) + .setCatName("nosuch") + .setDbName(DEFAULT_DATABASE_NAME) + .setClass(TEST_FUNCTION_CLASS) + .setFunctionType(FunctionType.JAVA) + .setOwnerType(PrincipalType.ROLE) + .setOwner("owner") + .setCreateTime(100) + .addResourceUri(new ResourceUri(ResourceType.JAR, "hdfs:///tmp/jar1.jar")) + .addResourceUri(new ResourceUri(ResourceType.FILE, "hdfs:///tmp/file1.txt")) + .addResourceUri(new ResourceUri(ResourceType.ARCHIVE, "hdfs:///tmp/archive1.tgz")) + .create(client, metaStore.getConf()); + } + + @Test(expected = NoSuchObjectException.class) + public void getNoSuchCatalog() throws TException { + client.getFunction("nosuch", DEFAULT_DATABASE_NAME, testFunctions[0].getFunctionName()); + } + + @Test(expected = NoSuchObjectException.class) + public void dropNoSuchCatalog() throws TException { + client.dropFunction("nosuch", DEFAULT_DATABASE_NAME, testFunctions[0].getFunctionName()); + } + + @Test + public void getFunctionsNoSuchCatalog() throws TException { + List functionNames = client.getFunctions("nosuch", DEFAULT_DATABASE_NAME, "*"); + Assert.assertEquals(0, functionNames.size()); + } + } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java index fe5060b4a3..80407284c0 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java @@ -18,15 +18,22 @@ package org.apache.hadoop.hive.metastore.client; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Set; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; @@ -37,6 +44,7 @@ import com.google.common.collect.Lists; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -45,6 +53,7 @@ import static junit.framework.TestCase.assertNotNull; import static junit.framework.TestCase.assertNull; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; @@ -86,16 +95,15 @@ public void tearDown() throws Exception { } private void createDB(String dbName) throws TException { - Database db = new DatabaseBuilder(). + new DatabaseBuilder(). setName(dbName). - build(); - client.createDatabase(db); + create(client, metaStore.getConf()); } - private static Table createTestTable(IMetaStoreClient client, String dbName, String tableName, + private Table createTestTable(IMetaStoreClient client, String dbName, String tableName, List partCols, boolean setPartitionLevelPrivilages) - throws Exception { + throws TException { TableBuilder builder = new TableBuilder() .setDbName(dbName) .setTableName(tableName) @@ -103,7 +111,7 @@ private static Table createTestTable(IMetaStoreClient client, String dbName, Str .addCol("name", "string"); partCols.forEach(col -> builder.addPartCol(col, "string")); - Table table = builder.build(); + Table table = builder.build(metaStore.getConf()); if (setPartitionLevelPrivilages) { table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true"); @@ -113,29 +121,29 @@ private static Table createTestTable(IMetaStoreClient client, String dbName, Str return table; } - private static void addPartition(IMetaStoreClient client, Table table, List values) + private void addPartition(IMetaStoreClient client, Table table, List values) throws TException { - PartitionBuilder partitionBuilder = new PartitionBuilder().fromTable(table); + PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table); values.forEach(val -> partitionBuilder.addValue(val)); - client.add_partition(partitionBuilder.build()); + client.add_partition(partitionBuilder.build(metaStore.getConf())); } - private static void createTable3PartCols1PartGeneric(IMetaStoreClient client, boolean authOn) - throws Exception { + private void createTable3PartCols1PartGeneric(IMetaStoreClient client, boolean authOn) + throws TException { Table t = createTestTable(client, DB_NAME, TABLE_NAME, Lists.newArrayList("yyyy", "mm", "dd"), authOn); addPartition(client, t, Lists.newArrayList("1997", "05", "16")); } - private static void createTable3PartCols1Part(IMetaStoreClient client) throws Exception { + private void createTable3PartCols1Part(IMetaStoreClient client) throws TException { createTable3PartCols1PartGeneric(client, false); } - private static void createTable3PartCols1PartAuthOn(IMetaStoreClient client) throws Exception { + private void createTable3PartCols1PartAuthOn(IMetaStoreClient client) throws TException { createTable3PartCols1PartGeneric(client, true); } - private static List> createTable4PartColsParts(IMetaStoreClient client) throws + private List> createTable4PartColsParts(IMetaStoreClient client) throws Exception { Table t = createTestTable(client, DB_NAME, TABLE_NAME, Lists.newArrayList("yyyy", "mm", "dd"), false); @@ -167,7 +175,6 @@ private static void assertAuthInfoReturned(String user, String group, Partition /** * Testing getPartition(String,String,String) -> * get_partition_by_name(String,String,String). - * @throws Exception */ @Test public void testGetPartition() throws Exception { @@ -247,7 +254,6 @@ public void testGetPartitionNullPartName() throws Exception { /** * Testing getPartition(String,String,List(String)) -> * get_partition(String,String,List(String)). - * @throws Exception */ @Test public void testGetPartitionByValues() throws Exception { @@ -322,7 +328,6 @@ public void testGetPartitionByValuesNullValues() throws Exception { /** * Testing getPartitionsByNames(String,String,List(String)) -> * get_partitions_by_names(String,String,List(String)). - * @throws Exception */ @Test public void testGetPartitionsByNames() throws Exception { @@ -414,7 +419,6 @@ public void testGetPartitionsByNamesNullNames() throws Exception { /** * Testing getPartitionWithAuthInfo(String,String,List(String),String,List(String)) -> * get_partition_with_auth(String,String,List(String),String,List(String)). - * @throws Exception */ @Test public void testGetPartitionWithAuthInfoNoPrivilagesSet() throws Exception { @@ -516,5 +520,85 @@ public void testGetPartitionWithAuthInfoNullGroups() throws Exception { Lists.newArrayList("1997", "05", "16"), "user0", null); } + @Test + public void otherCatalog() throws TException { + String catName = "get_partition_catalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "get_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .addTableParam("PARTITION_LEVEL_PRIVILEGE", "true") + .create(client, metaStore.getConf()); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < parts.length; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .build(metaStore.getConf()); + } + client.add_partitions(Arrays.asList(parts)); + + Partition fetched = client.getPartition(catName, dbName, tableName, + Collections.singletonList("a0")); + Assert.assertEquals(catName, fetched.getCatName()); + Assert.assertEquals("a0", fetched.getValues().get(0)); + + fetched = client.getPartition(catName, dbName, tableName, "partcol=a0"); + Assert.assertEquals(catName, fetched.getCatName()); + Assert.assertEquals("a0", fetched.getValues().get(0)); + + List fetchedParts = client.getPartitionsByNames(catName, dbName, tableName, + Arrays.asList("partcol=a0", "partcol=a1")); + Assert.assertEquals(2, fetchedParts.size()); + Set vals = new HashSet<>(fetchedParts.size()); + for (Partition part : fetchedParts) vals.add(part.getValues().get(0)); + Assert.assertTrue(vals.contains("a0")); + Assert.assertTrue(vals.contains("a1")); + + } + + @Test(expected = NoSuchObjectException.class) + public void getPartitionBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.getPartition("bogus", DB_NAME, TABLE_NAME, Lists.newArrayList("1997", "05", "16")); + } + + @Test(expected = NoSuchObjectException.class) + public void getPartitionByNameBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.getPartition("bogus", DB_NAME, TABLE_NAME, "yyyy=1997/mm=05/dd=16"); + } + + @Test(expected = NoSuchObjectException.class) + public void getPartitionWithAuthBogusCatalog() throws TException { + createTable3PartCols1PartAuthOn(client); + client.getPartitionWithAuthInfo("bogus", DB_NAME, TABLE_NAME, + Lists.newArrayList("1997", "05", "16"), "user0", Lists.newArrayList("group0")); + } + + @Test(expected = NoSuchObjectException.class) + public void getPartitionsByNamesBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.getPartitionsByNames("bogus", DB_NAME, TABLE_NAME, + Collections.singletonList("yyyy=1997/mm=05/dd=16")); + } + + } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetTableMeta.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetTableMeta.java index 4b22a7be48..d8448c8783 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetTableMeta.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetTableMeta.java @@ -19,32 +19,38 @@ package org.apache.hadoop.hive.metastore.client; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Set; import com.google.common.collect.ImmutableSet; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableMeta; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.thrift.TException; import com.google.common.collect.Lists; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import static java.util.stream.Collectors.toSet; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -103,12 +109,10 @@ public void tearDown() throws Exception { } - private Database createDB(String dbName) throws TException { - Database db = new DatabaseBuilder(). + private void createDB(String dbName) throws TException { + new DatabaseBuilder(). setName(dbName). - build(); - client.createDatabase(db); - return db; + create(client, metaStore.getConf()); } @@ -122,12 +126,12 @@ private Table createTable(String dbName, String tableName, TableType type) .setType(type.name()); - Table table = builder.build(); + Table table = builder.build(metaStore.getConf()); if (type == TableType.MATERIALIZED_VIEW) { CreationMetadata cm = new CreationMetadata( - dbName, tableName, ImmutableSet.of()); + MetaStoreUtils.getDefaultCatalog(metaStore.getConf()), dbName, tableName, ImmutableSet.of()); table.setCreationMetadata(cm); } @@ -156,21 +160,29 @@ private TableMeta createTestTable(String dbName, String tableName, TableType typ } private void assertTableMetas(int[] expected, List actualTableMetas) { - assertEquals("Expected " + expected.length + " but have " + actualTableMetas.size() + - " tableMeta(s)", expected.length, actualTableMetas.size()); + assertTableMetas(expectedMetas, actualTableMetas, expected); + } + + private void assertTableMetas(List actual, int... expected) { + assertTableMetas(expectedMetas, actual, expected); + } - Set metas = actualTableMetas.stream().collect(toSet()); + private void assertTableMetas(List fullExpected, List actual, int... expected) { + assertEquals("Expected " + expected.length + " but have " + actual.size() + + " tableMeta(s)", expected.length, actual.size()); + + Set metas = new HashSet<>(actual); for (int i : expected){ - assertTrue("Missing " + expectedMetas.get(i), metas.remove(expectedMetas.get(i))); + assertTrue("Missing " + fullExpected.get(i), metas.remove(fullExpected.get(i))); } assertTrue("Unexpected tableMeta(s): " + metas, metas.isEmpty()); + } /** * Testing getTableMeta(String,String,List(String)) -> * get_table_meta(String,String,List(String)). - * @throws Exception */ @Test public void testGetTableMeta() throws Exception { @@ -260,4 +272,55 @@ public void testGetTableMetaNullNoDbNoTbl() throws Exception { assertTableMetas(new int[]{}, tableMetas); } + @Test + public void tablesInDifferentCatalog() throws TException { + String catName = "get_table_meta_catalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "db9"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String[] tableNames = {"table_in_other_catalog_1", "table_in_other_catalog_2", "random_name"}; + List expected = new ArrayList<>(tableNames.length); + for (int i = 0; i < tableNames.length; i++) { + client.createTable(new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("id", "int") + .addCol("name", "string") + .build(metaStore.getConf())); + expected.add(new TableMeta(dbName, tableNames[i], TableType.MANAGED_TABLE.name())); + } + + List types = Collections.singletonList(TableType.MANAGED_TABLE.name()); + List actual = client.getTableMeta(catName, dbName, "*", types); + assertTableMetas(expected, actual, 0, 1, 2); + + actual = client.getTableMeta(catName, "*", "table_*", types); + assertTableMetas(expected, actual, 0, 1); + + actual = client.getTableMeta(dbName, "table_in_other_catalog_*", types); + assertTableMetas(expected, actual); + } + + @Test + public void noSuchCatalog() throws TException { + List tableMetas = client.getTableMeta("nosuchcatalog", "*", "*", Lists.newArrayList()); + Assert.assertEquals(0, tableMetas.size()); + } + + @Test + public void catalogPatternsDontWork() throws TException { + List tableMetas = client.getTableMeta("h*", "*", "*", Lists.newArrayList()); + Assert.assertEquals(0, tableMetas.size()); + } + } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java index f5e4b8e906..a8b6e316da 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java @@ -19,10 +19,14 @@ package org.apache.hadoop.hive.metastore.client; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -33,9 +37,11 @@ import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; import org.apache.hadoop.hive.metastore.api.PartitionValuesRow; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -45,6 +51,8 @@ import com.google.common.collect.Lists; import org.junit.After; +import org.junit.Assert; +import org.junit.Assume; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -96,22 +104,21 @@ public void tearDown() throws Exception { } private void createDB(String dbName) throws TException { - Database db = new DatabaseBuilder(). + new DatabaseBuilder(). setName(dbName). - build(); - client.createDatabase(db); + create(client, metaStore.getConf()); } - private static Table createTestTable(IMetaStoreClient client, String dbName, String tableName, + private Table createTestTable(IMetaStoreClient client, String dbName, String tableName, List partCols) throws Exception { return createTestTable(client, dbName, tableName, partCols, false); } - private static Table createTestTable(IMetaStoreClient client, String dbName, String tableName, + private Table createTestTable(IMetaStoreClient client, String dbName, String tableName, List partCols, boolean setPartitionLevelPrivilages) - throws Exception { + throws TException { TableBuilder builder = new TableBuilder() .setDbName(dbName) .setTableName(tableName) @@ -119,7 +126,7 @@ private static Table createTestTable(IMetaStoreClient client, String dbName, Str .addCol("name", "string"); partCols.forEach(col -> builder.addPartCol(col, "string")); - Table table = builder.build(); + Table table = builder.build(metaStore.getConf()); if (setPartitionLevelPrivilages) { table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true"); @@ -129,25 +136,25 @@ private static Table createTestTable(IMetaStoreClient client, String dbName, Str return table; } - private static void addPartition(IMetaStoreClient client, Table table, List values) + private void addPartition(IMetaStoreClient client, Table table, List values) throws TException { - PartitionBuilder partitionBuilder = new PartitionBuilder().fromTable(table); + PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table); values.forEach(val -> partitionBuilder.addValue(val)); - client.add_partition(partitionBuilder.build()); + client.add_partition(partitionBuilder.build(metaStore.getConf())); } - private static void createTable3PartCols1PartGeneric(IMetaStoreClient client, boolean authOn) - throws Exception { + private void createTable3PartCols1PartGeneric(IMetaStoreClient client, boolean authOn) + throws TException { Table t = createTestTable(client, DB_NAME, TABLE_NAME, Lists.newArrayList("yyyy", "mm", "dd"), authOn); addPartition(client, t, Lists.newArrayList("1997", "05", "16")); } - private static void createTable3PartCols1Part(IMetaStoreClient client) throws Exception { + private void createTable3PartCols1Part(IMetaStoreClient client) throws TException { createTable3PartCols1PartGeneric(client, false); } - private static List> createTable4PartColsPartsGeneric(IMetaStoreClient client, + private List> createTable4PartColsPartsGeneric(IMetaStoreClient client, boolean authOn) throws Exception { Table t = createTestTable(client, DB_NAME, TABLE_NAME, Lists.newArrayList("yyyy", "mm", "dd"), @@ -165,12 +172,12 @@ private static void createTable3PartCols1Part(IMetaStoreClient client) throws Ex return testValues; } - private static List> createTable4PartColsParts(IMetaStoreClient client) throws + private List> createTable4PartColsParts(IMetaStoreClient client) throws Exception { return createTable4PartColsPartsGeneric(client, false); } - private static List> createTable4PartColsPartsAuthOn(IMetaStoreClient client) throws + private List> createTable4PartColsPartsAuthOn(IMetaStoreClient client) throws Exception { return createTable4PartColsPartsGeneric(client, true); } @@ -236,7 +243,6 @@ private static void assertCorrectPartitionValuesResponse(List> test /** * Testing listPartitions(String,String,short) -> * get_partitions(String,String,short). - * @throws Exception */ @Test public void testListPartitionsAll() throws Exception { @@ -247,8 +253,11 @@ public void testListPartitionsAll() throws Exception { partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)1); assertPartitionsHaveCorrectValues(partitions, testValues.subList(0, 1)); - partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)0); - assertTrue(partitions.isEmpty()); + // HIVE-18977 + if (MetastoreConf.getBoolVar(metaStore.getConf(), MetastoreConf.ConfVars.TRY_DIRECT_SQL)) { + partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short) 0); + assertTrue(partitions.isEmpty()); + } } @@ -293,7 +302,8 @@ public void testListPartitionsAllNoTblName() throws Exception { public void testListPartitionsAllNullTblName() throws Exception { try { createTable3PartCols1Part(client); - List partitions = client.listPartitions(DB_NAME, null, (short)-1); + List partitions = client.listPartitions(DB_NAME, + (String)null, (short)-1); fail("Should have thrown exception"); } catch (NullPointerException | TTransportException e) { //TODO: should not throw different exceptions for different HMS deployment types @@ -316,7 +326,6 @@ public void testListPartitionsAllNullDbName() throws Exception { /** * Testing listPartitions(String,String,List(String),short) -> * get_partitions(String,String,List(String),short). - * @throws Exception */ @Test public void testListPartitionsByValues() throws Exception { @@ -388,7 +397,7 @@ public void testListPartitionsByValuesNullTblName() throws Exception { @Test(expected = MetaException.class) public void testListPartitionsByValuesNullValues() throws Exception { createTable3PartCols1Part(client); - client.listPartitions(DB_NAME, TABLE_NAME, null, (short)-1); + client.listPartitions(DB_NAME, TABLE_NAME, (List)null, (short)-1); } @@ -396,7 +405,6 @@ public void testListPartitionsByValuesNullValues() throws Exception { /** * Testing listPartitionSpecs(String,String,int) -> * get_partitions_pspec(String,String,int). - * @throws Exception */ @Test public void testListPartitionSpecs() throws Exception { @@ -408,8 +416,11 @@ public void testListPartitionSpecs() throws Exception { partSpecProxy = client.listPartitionSpecs(DB_NAME, TABLE_NAME, 2); assertPartitionsSpecProxy(partSpecProxy, testValues.subList(0, 2)); - partSpecProxy = client.listPartitionSpecs(DB_NAME, TABLE_NAME, 0); - assertPartitionsSpecProxy(partSpecProxy, testValues.subList(0, 0)); + // HIVE-18977 + if (MetastoreConf.getBoolVar(metaStore.getConf(), MetastoreConf.ConfVars.TRY_DIRECT_SQL)) { + partSpecProxy = client.listPartitionSpecs(DB_NAME, TABLE_NAME, 0); + assertPartitionsSpecProxy(partSpecProxy, testValues.subList(0, 0)); + } } @Test(expected = NoSuchObjectException.class) @@ -447,7 +458,7 @@ public void testListPartitionSpecsNullDbName() throws Exception { createTable4PartColsParts(client); client.listPartitionSpecs(null, TABLE_NAME, -1); fail("Should have thrown exception"); - } catch (NullPointerException | TTransportException e) { + } catch (MetaException | TTransportException e) { //TODO: should not throw different exceptions for different HMS deployment types } } @@ -468,7 +479,6 @@ public void testListPartitionSpecsNullTblName() throws Exception { /** * Testing listPartitionsWithAuthInfo(String,String,short,String,List(String)) -> * get_partitions_with_auth(String,String,short,String,List(String)). - * @throws Exception */ @Test public void testListPartitionsWithAuth() throws Exception { @@ -539,9 +549,10 @@ public void testListPartitionsWithAuthNullDbName() throws Exception { public void testListPartitionsWithAuthNullTblName() throws Exception { try { createTable4PartColsParts(client); - client.listPartitionsWithAuthInfo(DB_NAME, null, (short)-1, "", Lists.newArrayList()); + client.listPartitionsWithAuthInfo(DB_NAME, (String)null, (short)-1, "", + Lists.newArrayList()); fail("Should have thrown exception"); - } catch (AssertionError| TTransportException e) { + } catch (MetaException| TTransportException e) { //TODO: should not throw different exceptions for different HMS deployment types } } @@ -563,7 +574,6 @@ public void testListPartitionsWithAuthNullGroup() throws Exception { /** * Testing listPartitionsWithAuthInfo(String,String,List(String),short,String,List(String)) -> * get_partitions_ps_with_auth(String,String,List(String),short,String,List(String)). - * @throws Exception */ @Test public void testListPartitionsWithAuthByValues() throws Exception { @@ -692,7 +702,7 @@ public void testListPartitionsWithAuthByValuesNullTblName() throws Exception { @Test(expected = MetaException.class) public void testListPartitionsWithAuthByValuesNullValues() throws Exception { createTable4PartColsParts(client); - client.listPartitionsWithAuthInfo(DB_NAME, TABLE_NAME, null, + client.listPartitionsWithAuthInfo(DB_NAME, TABLE_NAME, (List)null, (short)-1, "", Lists.newArrayList()); } @@ -717,7 +727,6 @@ public void testListPartitionsWithAuthByValuesNullGroup() throws Exception { /** * Testing listPartitionsByFilter(String,String,String,short) -> * get_partitions_by_filter(String,String,String,short). - * @throws Exception */ @Test public void testListPartitionsByFilter() throws Exception { @@ -736,9 +745,12 @@ public void testListPartitionsByFilter() throws Exception { "yyyy=\"2017\" OR " + "mm=\"02\"", (short)0); assertTrue(partitions.isEmpty()); - partitions = client.listPartitionsByFilter(DB_NAME, TABLE_NAME, - "yYyY=\"2017\"", (short)-1); - assertPartitionsHaveCorrectValues(partitions, partValues.subList(2, 4)); + // HIVE-18977 + if (MetastoreConf.getBoolVar(metaStore.getConf(), MetastoreConf.ConfVars.TRY_DIRECT_SQL)) { + partitions = client.listPartitionsByFilter(DB_NAME, TABLE_NAME, + "yYyY=\"2017\"", (short) -1); + assertPartitionsHaveCorrectValues(partitions, partValues.subList(2, 4)); + } partitions = client.listPartitionsByFilter(DB_NAME, TABLE_NAME, "yyyy=\"2017\" AND mm=\"99\"", (short)-1); @@ -822,7 +834,6 @@ public void testListPartitionsByFilterEmptyFilter() throws Exception { /** * Testing listPartitionSpecsByFilter(String,String,String,int) -> * get_part_specs_by_filter(String,String,String,int). - * @throws Exception */ @Test public void testListPartitionsSpecsByFilter() throws Exception { @@ -844,9 +855,12 @@ public void testListPartitionsSpecsByFilter() throws Exception { "yyyy=\"20177\"", -1); assertPartitionsSpecProxy(partSpecProxy, Lists.newArrayList()); - partSpecProxy = client.listPartitionSpecsByFilter(DB_NAME, TABLE_NAME, - "yYyY=\"2017\"", -1); - assertPartitionsSpecProxy(partSpecProxy, testValues.subList(2, 4)); + // HIVE-18977 + if (MetastoreConf.getBoolVar(metaStore.getConf(), MetastoreConf.ConfVars.TRY_DIRECT_SQL)) { + partSpecProxy = client.listPartitionSpecsByFilter(DB_NAME, TABLE_NAME, + "yYyY=\"2017\"", -1); + assertPartitionsSpecProxy(partSpecProxy, testValues.subList(2, 4)); + } partSpecProxy = client.listPartitionSpecsByFilter(DB_NAME, TABLE_NAME, "yyyy=\"2017\" AND mm=\"99\"", -1); @@ -919,7 +933,6 @@ public void testListPartitionSpecsByFilterEmptyFilter() throws Exception { /** * Testing getNumPartitionsByFilter(String,String,String) -> * get_num_partitions_by_filter(String,String,String). - * @throws Exception */ @Test public void testGetNumPartitionsByFilter() throws Exception { @@ -934,8 +947,11 @@ public void testGetNumPartitionsByFilter() throws Exception { n = client.getNumPartitionsByFilter(DB_NAME, TABLE_NAME, "yyyy=\"20177\""); assertEquals(0, n); - n = client.getNumPartitionsByFilter(DB_NAME, TABLE_NAME, "yYyY=\"2017\""); - assertEquals(2, n); + // HIVE-18977 + if (MetastoreConf.getBoolVar(metaStore.getConf(), MetastoreConf.ConfVars.TRY_DIRECT_SQL)) { + n = client.getNumPartitionsByFilter(DB_NAME, TABLE_NAME, "yYyY=\"2017\""); + assertEquals(2, n); + } n = client.getNumPartitionsByFilter(DB_NAME, TABLE_NAME, "yyyy=\"2017\" AND mm=\"99\""); assertEquals(0, n); @@ -977,7 +993,7 @@ public void testGetNumPartitionsByFilterNullTblName() throws Exception { createTable4PartColsParts(client); client.getNumPartitionsByFilter(DB_NAME, null, "yyyy=\"2017\""); fail("Should have thrown exception"); - } catch (AssertionError | TTransportException e) { + } catch (MetaException | TTransportException e) { //TODO: should not throw different exceptions for different HMS deployment types } } @@ -1000,7 +1016,6 @@ public void testGetNumPartitionsByFilterNullFilter() throws Exception { /** * Testing listPartitionNames(String,String,short) -> * get_partition_names(String,String,short). - * @throws Exception */ @Test public void testListPartitionNames() throws Exception { @@ -1061,7 +1076,7 @@ public void testListPartitionNamesNullDbName() throws Exception { public void testListPartitionNamesNullTblName() throws Exception { try { createTable4PartColsParts(client); - client.listPartitionNames(DB_NAME, null, (short)-1); + client.listPartitionNames(DB_NAME, (String)null, (short)-1); fail("Should have thrown exception"); } catch (NullPointerException | TTransportException e) { //TODO: should not throw different exceptions for different HMS deployment types @@ -1073,7 +1088,6 @@ public void testListPartitionNamesNullTblName() throws Exception { /** * Testing listPartitionNames(String,String,List(String),short) -> * get_partition_names_ps(String,String,List(String),short). - * @throws Exception */ @Test public void testListPartitionNamesByValues() throws Exception { @@ -1175,7 +1189,7 @@ public void testListPartitionNamesByValuesNullTblName() throws Exception { @Test(expected = MetaException.class) public void testListPartitionNamesByValuesNullValues() throws Exception { createTable4PartColsParts(client); - client.listPartitionNames(DB_NAME, TABLE_NAME, null, (short)-1); + client.listPartitionNames(DB_NAME, TABLE_NAME, (List)null, (short)-1); } @@ -1183,7 +1197,6 @@ public void testListPartitionNamesByValuesNullValues() throws Exception { /** * Testing listPartitionValues(PartitionValuesRequest) -> * get_partition_values(PartitionValuesRequest). - * @throws Exception */ @Test public void testListPartitionValues() throws Exception { @@ -1319,4 +1332,131 @@ public void testListPartitionValuesNullRequest() throws Exception { } } + @Test + public void otherCatalog() throws TException { + String catName = "list_partition_catalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "list_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, metaStore.getConf()); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < parts.length; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .build(metaStore.getConf()); + } + client.add_partitions(Arrays.asList(parts)); + + List fetched = client.listPartitions(catName, dbName, tableName, -1); + Assert.assertEquals(parts.length, fetched.size()); + Assert.assertEquals(catName, fetched.get(0).getCatName()); + + fetched = client.listPartitions(catName, dbName, tableName, + Collections.singletonList("a0"), -1); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(catName, fetched.get(0).getCatName()); + + PartitionSpecProxy proxy = client.listPartitionSpecs(catName, dbName, tableName, -1); + Assert.assertEquals(parts.length, proxy.size()); + Assert.assertEquals(catName, proxy.getCatName()); + + fetched = client.listPartitionsByFilter(catName, dbName, tableName, "partcol=\"a0\"", -1); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(catName, fetched.get(0).getCatName()); + + proxy = client.listPartitionSpecsByFilter(catName, dbName, tableName, "partcol=\"a0\"", -1); + Assert.assertEquals(1, proxy.size()); + Assert.assertEquals(catName, proxy.getCatName()); + + Assert.assertEquals(1, client.getNumPartitionsByFilter(catName, dbName, tableName, + "partcol=\"a0\"")); + + List names = client.listPartitionNames(catName, dbName, tableName, 57); + Assert.assertEquals(parts.length, names.size()); + + names = client.listPartitionNames(catName, dbName, tableName, Collections.singletonList("a0"), + Short.MAX_VALUE + 1); + Assert.assertEquals(1, names.size()); + + PartitionValuesRequest rqst = new PartitionValuesRequest(dbName, + tableName, Lists.newArrayList(new FieldSchema("partcol", "string", ""))); + rqst.setCatName(catName); + PartitionValuesResponse rsp = client.listPartitionValues(rqst); + Assert.assertEquals(5, rsp.getPartitionValuesSize()); + } + + @Test(expected = NoSuchObjectException.class) + public void listPartitionsBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.listPartitions("bogus", DB_NAME, TABLE_NAME, -1); + } + + @Test(expected = NoSuchObjectException.class) + public void listPartitionsWithPartialValuesBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.listPartitions("bogus", DB_NAME, TABLE_NAME, Collections.singletonList("a0"), -1); + } + + @Test(expected = NoSuchObjectException.class) + public void listPartitionsSpecsBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.listPartitionSpecs("bogus", DB_NAME, TABLE_NAME, -1); + } + + @Test(expected = NoSuchObjectException.class) + public void listPartitionsByFilterBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.listPartitionsByFilter("bogus", DB_NAME, TABLE_NAME, "partcol=\"a0\"", -1); + } + + @Test(expected = NoSuchObjectException.class) + public void listPartitionSpecsByFilterBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.listPartitionSpecsByFilter("bogus", DB_NAME, TABLE_NAME, "partcol=\"a0\"", -1); + } + + @Test(expected = NoSuchObjectException.class) + public void getNumPartitionsByFilterBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.getNumPartitionsByFilter("bogus", DB_NAME, TABLE_NAME, "partcol=\"a0\""); + } + + @Test(expected = NoSuchObjectException.class) + public void listPartitionNamesBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.listPartitionNames("bogus", DB_NAME, TABLE_NAME, -1); + } + + @Test(expected = NoSuchObjectException.class) + public void listPartitionNamesPartialValsBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.listPartitionNames("bogus", DB_NAME, TABLE_NAME, Collections.singletonList("a0"), -1); + } + + @Test(expected = MetaException.class) + public void listPartitionValuesBogusCatalog() throws TException { + createTable3PartCols1Part(client); + PartitionValuesRequest rqst = new PartitionValuesRequest(DB_NAME, + TABLE_NAME, Lists.newArrayList(new FieldSchema("partcol", "string", ""))); + rqst.setCatName("bogus"); + client.listPartitionValues(rqst); + } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java new file mode 100644 index 0000000000..0c75295605 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java @@ -0,0 +1,352 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLNotNullConstraintBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.List; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +@RunWith(Parameterized.class) +@Category(MetastoreCheckinTest.class) +public class TestNotNullConstraint extends MetaStoreClientTest { + private static final String OTHER_DATABASE = "test_uc_other_database"; + private static final String OTHER_CATALOG = "test_uc_other_catalog"; + private static final String DATABASE_IN_OTHER_CATALOG = "test_uc_database_in_other_catalog"; + private final AbstractMetaStoreService metaStore; + private IMetaStoreClient client; + private Table[] testTables = new Table[3]; + private Database inOtherCatalog; + + public TestNotNullConstraint(String name, AbstractMetaStoreService metaStore) throws Exception { + this.metaStore = metaStore; + this.metaStore.start(); + } + + @Before + public void setUp() throws Exception { + // Get new client + client = metaStore.getClient(); + + // Clean up the database + client.dropDatabase(OTHER_DATABASE, true, true, true); + // Drop every table in the default database + for(String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) { + client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true); + } + + client.dropDatabase(OTHER_CATALOG, DATABASE_IN_OTHER_CATALOG, true, true, true); + try { + client.dropCatalog(OTHER_CATALOG); + } catch (NoSuchObjectException e) { + // NOP + } + + // Clean up trash + metaStore.cleanWarehouseDirs(); + + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); + + Catalog cat = new CatalogBuilder() + .setName(OTHER_CATALOG) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(OTHER_CATALOG)) + .build(); + client.createCatalog(cat); + + // For this one don't specify a location to make sure it gets put in the catalog directory + inOtherCatalog = new DatabaseBuilder() + .setName(DATABASE_IN_OTHER_CATALOG) + .setCatalogName(OTHER_CATALOG) + .create(client, metaStore.getConf()); + + testTables[0] = + new TableBuilder() + .setTableName("test_table_1") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[1] = + new TableBuilder() + .setDbName(OTHER_DATABASE) + .setTableName("test_table_2") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[2] = + new TableBuilder() + .inDb(inOtherCatalog) + .setTableName("test_table_3") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + // Reload tables from the MetaStore + for(int i=0; i < testTables.length; i++) { + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); + } + } + + @After + public void tearDown() throws Exception { + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + @Test + public void createGetDrop() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + NotNullConstraintsRequest rqst = + new NotNullConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getNotNullConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List nn = new SQLNotNullConstraintBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addNotNullConstraint(nn); + + rqst = new NotNullConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getNotNullConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(table.getTableName() + "_not_null_constraint", fetched.get(0).getNn_name()); + String table0PkName = fetched.get(0).getNn_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a primary key + client.dropConstraint(table.getCatName(), table.getDbName(), + table.getTableName(), table0PkName); + rqst = new NotNullConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getNotNullConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addNotNullConstraint(nn); + } + + @Test + public void inOtherCatalog() throws TException { + String constraintName = "ocuc"; + // Table in non 'hive' catalog + List nn = new SQLNotNullConstraintBuilder() + .onTable(testTables[2]) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + client.addNotNullConstraint(nn); + + NotNullConstraintsRequest rqst = new NotNullConstraintsRequest(testTables[2].getCatName(), + testTables[2].getDbName(), testTables[2].getTableName()); + List fetched = client.getNotNullConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(testTables[2].getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(testTables[2].getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(constraintName, fetched.get(0).getNn_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(testTables[2].getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName(), constraintName); + rqst = new NotNullConstraintsRequest(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName()); + fetched = client.getNotNullConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void createTableWithConstraintsPk() throws TException { + String constraintName = "ctwcuc"; + Table table = new TableBuilder() + .setTableName("table_with_constraints") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List nn = new SQLNotNullConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, null, null, nn, null, null); + NotNullConstraintsRequest rqst = new NotNullConstraintsRequest(table.getCatName(), + table.getDbName(), table.getTableName()); + List fetched = client.getNotNullConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(constraintName, fetched.get(0).getNn_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), constraintName); + rqst = new NotNullConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getNotNullConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + } + + @Test + public void createTableWithConstraintsPkInOtherCatalog() throws TException { + Table table = new TableBuilder() + .setTableName("table_in_other_catalog_with_constraints") + .inDb(inOtherCatalog) + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List nn = new SQLNotNullConstraintBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, null, null, nn, null, null); + NotNullConstraintsRequest rqst = new NotNullConstraintsRequest(table.getCatName(), + table.getDbName(), table.getTableName()); + List fetched = client.getNotNullConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(table.getTableName() + "_not_null_constraint", fetched.get(0).getNn_name()); + String tablePkName = fetched.get(0).getNn_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), tablePkName); + rqst = new NotNullConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getNotNullConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void doubleAddNotNullConstraint() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + NotNullConstraintsRequest rqst = + new NotNullConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getNotNullConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List nn = new SQLNotNullConstraintBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addNotNullConstraint(nn); + + try { + nn = new SQLNotNullConstraintBuilder() + .onTable(table) + .addColumn("col2") + .build(metaStore.getConf()); + client.addNotNullConstraint(nn); + Assert.fail(); + } catch (InvalidObjectException|TApplicationException e) { + // NOP + } + } + + @Test + public void addNoSuchTable() throws TException { + try { + List nn = new SQLNotNullConstraintBuilder() + .setTableName("nosuch") + .addColumn("col2") + .build(metaStore.getConf()); + client.addNotNullConstraint(nn); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void getNoSuchTable() throws TException { + NotNullConstraintsRequest rqst = + new NotNullConstraintsRequest(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "nosuch"); + List nn = client.getNotNullConstraints(rqst); + Assert.assertTrue(nn.isEmpty()); + } + + @Test + public void getNoSuchDb() throws TException { + NotNullConstraintsRequest rqst = + new NotNullConstraintsRequest(DEFAULT_CATALOG_NAME, "nosuch", testTables[0].getTableName()); + List nn = client.getNotNullConstraints(rqst); + Assert.assertTrue(nn.isEmpty()); + } + + @Test + public void getNoSuchCatalog() throws TException { + NotNullConstraintsRequest rqst = + new NotNullConstraintsRequest("nosuch", testTables[0].getDbName(), testTables[0].getTableName()); + List nn = client.getNotNullConstraints(rqst); + Assert.assertTrue(nn.isEmpty()); + } +} + diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java new file mode 100644 index 0000000000..52f5b93e8a --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java @@ -0,0 +1,465 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLPrimaryKeyBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.List; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +@RunWith(Parameterized.class) +@Category(MetastoreCheckinTest.class) +public class TestPrimaryKey extends MetaStoreClientTest { + private static final String OTHER_DATABASE = "test_constraints_other_database"; + private static final String OTHER_CATALOG = "test_constraints_other_catalog"; + private static final String DATABASE_IN_OTHER_CATALOG = "test_constraints_database_in_other_catalog"; + private final AbstractMetaStoreService metaStore; + private IMetaStoreClient client; + private Table[] testTables = new Table[3]; + private Database inOtherCatalog; + + public TestPrimaryKey(String name, AbstractMetaStoreService metaStore) throws Exception { + this.metaStore = metaStore; + this.metaStore.start(); + } + + @Before + public void setUp() throws Exception { + // Get new client + client = metaStore.getClient(); + + // Clean up the database + client.dropDatabase(OTHER_DATABASE, true, true, true); + // Drop every table in the default database + for(String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) { + client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true); + } + + client.dropDatabase(OTHER_CATALOG, DATABASE_IN_OTHER_CATALOG, true, true, true); + try { + client.dropCatalog(OTHER_CATALOG); + } catch (NoSuchObjectException e) { + // NOP + } + + // Clean up trash + metaStore.cleanWarehouseDirs(); + + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); + + Catalog cat = new CatalogBuilder() + .setName(OTHER_CATALOG) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(OTHER_CATALOG)) + .build(); + client.createCatalog(cat); + + // For this one don't specify a location to make sure it gets put in the catalog directory + inOtherCatalog = new DatabaseBuilder() + .setName(DATABASE_IN_OTHER_CATALOG) + .setCatalogName(OTHER_CATALOG) + .create(client, metaStore.getConf()); + + testTables[0] = + new TableBuilder() + .setTableName("test_table_1") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[1] = + new TableBuilder() + .setDbName(OTHER_DATABASE) + .setTableName("test_table_2") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[2] = + new TableBuilder() + .inDb(inOtherCatalog) + .setTableName("test_table_3") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + // Reload tables from the MetaStore + for(int i=0; i < testTables.length; i++) { + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); + } + } + + @After + public void tearDown() throws Exception { + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + @Test + public void createGetDrop() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + PrimaryKeysRequest rqst = + new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List pk = new SQLPrimaryKeyBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getPrimaryKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(table.getTableName() + "_primary_key", fetched.get(0).getPk_name()); + String table0PkName = fetched.get(0).getPk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a primary key + client.dropConstraint(table.getCatName(), table.getDbName(), + table.getTableName(), table0PkName); + rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addPrimaryKey(pk); + } + + @Test + public void createGetDrop2Column() throws TException { + // Make sure get on a table with no key returns empty list + Table table = testTables[1]; + PrimaryKeysRequest rqst = + new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + String constraintName = "cgd2cpk"; + // Multi-column. Also covers table in non-default database + List pk = new SQLPrimaryKeyBuilder() + .onTable(table) + .addColumn("col1") + .addColumn("col2") + .setEnable(false) + .setConstraintName(constraintName) + .setValidate(true) + .setRely(true) + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getPrimaryKeys(rqst); + Assert.assertEquals(2, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals("col2", fetched.get(1).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(2, fetched.get(1).getKey_seq()); + Assert.assertEquals(constraintName, fetched.get(0).getPk_name()); + Assert.assertEquals(fetched.get(0).getPk_name(), fetched.get(1).getPk_name()); + Assert.assertFalse(fetched.get(0).isEnable_cstr()); + Assert.assertTrue(fetched.get(0).isValidate_cstr()); + Assert.assertTrue(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a named primary key + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), constraintName); + rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addPrimaryKey(pk); + } + + @Test + public void inOtherCatalog() throws TException { + PrimaryKeysRequest rqst = + new PrimaryKeysRequest(testTables[2].getDbName(), testTables[2].getTableName()); + rqst.setCatName(testTables[2].getCatName()); + List fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + String constraintName = "ocpk"; + // Table in non 'hive' catalog + List pk = new SQLPrimaryKeyBuilder() + .onTable(testTables[2]) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + rqst = new PrimaryKeysRequest(testTables[2].getDbName(), testTables[2].getTableName()); + rqst.setCatName(testTables[2].getCatName()); + fetched = client.getPrimaryKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(testTables[2].getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(testTables[2].getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(constraintName, fetched.get(0).getPk_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(testTables[2].getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName(), constraintName); + rqst = new PrimaryKeysRequest(testTables[2].getDbName(), testTables[2].getTableName()); + rqst.setCatName(testTables[2].getCatName()); + fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void createTableWithConstraintsPk() throws TException { + String constraintName = "ctwcpk"; + Table table = new TableBuilder() + .setTableName("table_with_constraints") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List pk = new SQLPrimaryKeyBuilder() + .onTable(table) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, pk, null, null, null, null, null); + PrimaryKeysRequest rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getPrimaryKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(constraintName, fetched.get(0).getPk_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), constraintName); + rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + } + + @Test + public void createTableWithConstraintsPkInOtherCatalog() throws TException { + Table table = new TableBuilder() + .setTableName("table_in_other_catalog_with_constraints") + .inDb(inOtherCatalog) + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List pk = new SQLPrimaryKeyBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, pk, null, null, null, null, null); + PrimaryKeysRequest rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getPrimaryKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(table.getTableName() + "_primary_key", fetched.get(0).getPk_name()); + String tablePkName = fetched.get(0).getPk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), tablePkName); + rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void doubleAddPrimaryKey() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + PrimaryKeysRequest rqst = + new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List pk = new SQLPrimaryKeyBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + try { + pk = new SQLPrimaryKeyBuilder() + .onTable(table) + .addColumn("col2") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + Assert.fail(); + } catch (MetaException e) { + Assert.assertTrue(e.getMessage().contains("Primary key already exists for")); + } + } + + @Test + public void addNoSuchTable() throws TException { + try { + List pk = new SQLPrimaryKeyBuilder() + .setTableName("nosuch") + .addColumn("col2") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + Assert.fail(); + } catch (InvalidObjectException|TApplicationException e) { + // NOP + } + } + + @Test + public void getNoSuchTable() throws TException { + PrimaryKeysRequest rqst = + new PrimaryKeysRequest(DEFAULT_DATABASE_NAME, "nosuch"); + List pk = client.getPrimaryKeys(rqst); + Assert.assertTrue(pk.isEmpty()); + } + + @Test + public void getNoSuchDb() throws TException { + PrimaryKeysRequest rqst = + new PrimaryKeysRequest("nosuch", testTables[0].getTableName()); + List pk = client.getPrimaryKeys(rqst); + Assert.assertTrue(pk.isEmpty()); + } + + @Test + public void getNoSuchCatalog() throws TException { + PrimaryKeysRequest rqst = + new PrimaryKeysRequest(testTables[0].getTableName(), testTables[0].getTableName()); + rqst.setCatName("nosuch"); + List pk = client.getPrimaryKeys(rqst); + Assert.assertTrue(pk.isEmpty()); + } + + @Test + public void dropNoSuchConstraint() throws TException { + try { + client.dropConstraint(testTables[0].getCatName(), testTables[0].getDbName(), + testTables[0].getTableName(), "nosuch"); + Assert.fail(); + } catch (InvalidObjectException|TApplicationException e) { + // NOP + } + + } + + @Test + public void dropNoSuchTable() throws TException { + try { + client.dropConstraint(testTables[0].getCatName(), testTables[0].getDbName(), + "nosuch", "mypk"); + Assert.fail(); + } catch (InvalidObjectException|TApplicationException e) { + // NOP + } + } + + @Test + public void dropNoSuchDatabase() throws TException { + try { + client.dropConstraint(testTables[0].getCatName(), "nosuch", + testTables[0].getTableName(), "mypk"); + Assert.fail(); + } catch (InvalidObjectException|TApplicationException e) { + // NOP + } + } + + @Test + public void dropNoSuchCatalog() throws TException { + try { + client.dropConstraint("nosuch", testTables[0].getDbName(), + testTables[0].getTableName(), "nosuch"); + Assert.fail(); + } catch (InvalidObjectException|TApplicationException e) { + // NOP + } + } + // TODO no fk across catalogs +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java index 61ac483d44..fe2d7587f6 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java @@ -20,9 +20,15 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.metastore.ColumnType; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; @@ -34,11 +40,15 @@ import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; import org.apache.thrift.protocol.TProtocolException; import org.apache.thrift.transport.TTransportException; import org.junit.After; @@ -50,10 +60,20 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import java.io.File; +import java.net.URI; +import java.net.URISyntaxException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; /** * Test class for IMetaStoreClient API. Testing the Table related functions for metadata @@ -78,7 +98,7 @@ public TestTablesCreateDropAlterTruncate(String name, AbstractMetaStoreService m public static void startMetaStores() { Map msConf = new HashMap(); // Enable trash, so it can be tested - Map extraConf = new HashMap(); + Map extraConf = new HashMap<>(); extraConf.put("fs.trash.checkpoint.interval", "30"); // FS_TRASH_CHECKPOINT_INTERVAL_KEY extraConf.put("fs.trash.interval", "30"); // FS_TRASH_INTERVAL_KEY (hadoop-2) startMetaStores(msConf, extraConf); @@ -101,74 +121,62 @@ public void setUp() throws Exception { testTables[0] = new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[1] = new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("test_view") .addCol("test_col", "int") .setType("VIRTUAL_VIEW") - .build(); + .create(client, metaStore.getConf()); testTables[2] = new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("test_table_to_find_1") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[3] = new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("test_partitioned_table") .addCol("test_col1", "int") .addCol("test_col2", "int") .addPartCol("test_part_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[4] = new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("external_table_for_test") .addCol("test_col", "int") .setLocation(metaStore.getWarehouseRoot() + "/external/table_dir") .addTableParam("EXTERNAL", "TRUE") .setType("EXTERNAL_TABLE") - .build(); + .create(client, metaStore.getConf()); - client.createDatabase(new DatabaseBuilder().setName(OTHER_DATABASE).build()); + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); testTables[5] = new TableBuilder() .setDbName(OTHER_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .build(); - - // Create the tables in the MetaStore - for(int i=0; i < testTables.length; i++) { - client.createTable(testTables[i]); - } + .create(client, metaStore.getConf()); // Create partitions for the partitioned table for(int i=0; i < 3; i++) { - Partition partition = - new PartitionBuilder() - .fromTable(testTables[3]) + new PartitionBuilder() + .inTable(testTables[3]) .addValue("a" + i) - .build(); - client.add_partition(partition); + .addToTable(client, metaStore.getConf()); } // Add data files to the partitioned table List partitions = client.listPartitions(testTables[3].getDbName(), testTables[3].getTableName(), (short)-1); for(Partition partition : partitions) { - Path dataFile = new Path(partition.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(partition.getSd().getLocation() + "/dataFile"); metaStore.createFile(dataFile, "100"); } @@ -177,7 +185,7 @@ public void setUp() throws Exception { testTables[i] = client.getTable(testTables[i].getDbName(), testTables[i].getTableName()); if (testTables[i].getPartitionKeys().isEmpty()) { if (testTables[i].getSd().getLocation() != null) { - Path dataFile = new Path(testTables[i].getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(testTables[i].getSd().getLocation() + "/dataFile"); metaStore.createFile(dataFile, "100"); } } @@ -199,7 +207,6 @@ public void tearDown() throws Exception { /** * This test creates and queries a table and then drops it. Good for testing the happy path - * @throws Exception */ @Test public void testCreateGetDeleteTable() throws Exception { @@ -237,7 +244,7 @@ public void testCreateGetDeleteTable() throws Exception { public void testCreateTableDefaultValues() throws Exception { Table table = new Table(); StorageDescriptor sd = new StorageDescriptor(); - List cols = new ArrayList(); + List cols = new ArrayList<>(); table.setDbName(DEFAULT_DATABASE); table.setTableName("test_table_2"); @@ -309,7 +316,7 @@ public void testCreateTableDefaultValues() throws Exception { public void testCreateTableDefaultLocationInSpecificDatabase() throws Exception { Table table = new Table(); StorageDescriptor sd = new StorageDescriptor(); - List cols = new ArrayList(); + List cols = new ArrayList<>(); table.setDbName(OTHER_DATABASE); table.setTableName("test_table_2"); @@ -329,7 +336,7 @@ public void testCreateTableDefaultLocationInSpecificDatabase() throws Exception public void testCreateTableDefaultValuesView() throws Exception { Table table = new Table(); StorageDescriptor sd = new StorageDescriptor(); - List cols = new ArrayList(); + List cols = new ArrayList<>(); table.setDbName(DEFAULT_DATABASE); table.setTableName("test_table_2"); @@ -343,7 +350,6 @@ public void testCreateTableDefaultValuesView() throws Exception { Table createdTable = client.getTable(table.getDbName(), table.getTableName()); // No location should be created for views - StorageDescriptor createdSd = createdTable.getSd(); Assert.assertNull("Storage descriptor location should be null", createdTable.getSd().getLocation()); } @@ -390,10 +396,9 @@ public void testCreateTableNullStorageDescriptor() throws Exception { private Table getNewTable() throws MetaException { return new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("test_table_with_invalid_sd") .addCol("test_col", "int") - .build(); + .build(metaStore.getConf()); } @Test(expected = MetaException.class) @@ -604,7 +609,7 @@ public void testDropTableExternalWithoutPurge() throws Exception { @Test public void testTruncateTableUnpartitioned() throws Exception { // Unpartitioned table - Path dataFile = new Path(testTables[0].getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(testTables[0].getSd().getLocation() + "/dataFile"); client.truncateTable(testTables[0].getDbName(), testTables[0].getTableName(), null); Assert.assertTrue("Location should exist", metaStore.isPathExists(new Path(testTables[0].getSd().getLocation()))); @@ -615,7 +620,7 @@ public void testTruncateTableUnpartitioned() throws Exception { @Test public void testTruncateTablePartitioned() throws Exception { // Partitioned table - delete specific partitions a0, a2 - List partitionsToDelete = new ArrayList(); + List partitionsToDelete = new ArrayList<>(); partitionsToDelete.add("test_part_col=a0"); partitionsToDelete.add("test_part_col=a2"); client.truncateTable(partitionedTable.getDbName(), partitionedTable.getTableName(), @@ -626,7 +631,7 @@ public void testTruncateTablePartitioned() throws Exception { client.listPartitions(partitionedTable.getDbName(), partitionedTable.getTableName(), (short)-1); for(Partition partition : partitions) { - Path dataFile = new Path(partition.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(partition.getSd().getLocation() + "/dataFile"); if (partition.getValues().contains("a0") || partition.getValues().contains("a2")) { // a0, a2 should be empty Assert.assertFalse("DataFile should be removed", metaStore.isPathExists(dataFile)); @@ -648,7 +653,7 @@ public void testTruncateTablePartitionedDeleteAll() throws Exception { client.listPartitions(partitionedTable.getDbName(), partitionedTable.getTableName(), (short)-1); for(Partition partition : partitions) { - Path dataFile = new Path(partition.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(partition.getSd().getLocation() + "/dataFile"); Assert.assertFalse("Every dataFile should be removed", metaStore.isPathExists(dataFile)); } } @@ -704,7 +709,7 @@ public void testAlterTableRename() throws Exception { Assert.assertEquals("New directory should be set", new Path(metaStore.getWarehouseRoot() + "/" + alteredTable.getTableName()), new Path(alteredTable.getSd().getLocation())); - Path dataFile = new Path(alteredTable.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile"); Assert.assertTrue("New directory should contain data", metaStore.isPathExists(dataFile)); // The following data should be changed @@ -731,7 +736,7 @@ public void testAlterTableChangingDatabase() throws Exception { Assert.assertEquals("New directory should be set", new Path(metaStore.getWarehouseRoot() + "/" + alteredTable.getDbName() + ".db/" + alteredTable.getTableName()), new Path(alteredTable.getSd().getLocation())); - Path dataFile = new Path(alteredTable.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile"); Assert.assertTrue("New directory should contain data", metaStore.isPathExists(dataFile)); // The following data should be changed, other data should be the same @@ -755,7 +760,7 @@ public void testAlterTableExternalTable() throws Exception { Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName()); Assert.assertEquals("New location should be the same", originalTable.getSd().getLocation(), alteredTable.getSd().getLocation()); - Path dataFile = new Path(alteredTable.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile"); Assert.assertTrue("The location should contain data", metaStore.isPathExists(dataFile)); // The extra parameters will be added on server side, so check that the required ones are @@ -782,7 +787,7 @@ public void testAlterTableExternalTableChangeLocation() throws Exception { metaStore.isPathExists(new Path(originalTable.getSd().getLocation()))); Assert.assertEquals("New location should be the new one", newTable.getSd().getLocation(), alteredTable.getSd().getLocation()); - Path dataFile = new Path(alteredTable.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile"); Assert.assertFalse("The location should not contain data", metaStore.isPathExists(dataFile)); // The extra parameters will be added on server side, so check that the required ones are @@ -833,6 +838,7 @@ public void testAlterTableChangeCols() throws Exception { Assert.assertEquals("The table data should be the same", newTable, alteredTable); } + @SuppressWarnings("deprecation") @Test public void testAlterTableCascade() throws Exception { Table originalTable = partitionedTable; @@ -1069,6 +1075,255 @@ public void testAlterTableAlreadyExists() throws Exception { } } + @Test + public void tablesInOtherCatalogs() throws TException, URISyntaxException { + String catName = "create_etc_tables_in_other_catalogs"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "db_in_other_catalog"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String[] tableNames = new String[4]; + for (int i = 0; i < tableNames.length; i++) { + tableNames[i] = "table_in_other_catalog_" + i; + TableBuilder builder = new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) + .addCol("col2_" + i, ColumnType.INT_TYPE_NAME); + // Make one have a non-standard location + if (i == 0) builder.setLocation(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])); + // Make one partitioned + if (i == 2) builder.addPartCol("pcol1", ColumnType.STRING_TYPE_NAME); + // Make one a materialized view + if (i == 3) { + builder.setType(TableType.MATERIALIZED_VIEW.name()) + .setRewriteEnabled(true) + .addMaterializedViewReferencedTable(dbName + "." + tableNames[0]); + } + client.createTable(builder.build(metaStore.getConf())); + } + + // Add partitions for the partitioned table + String[] partVals = new String[3]; + Table partitionedTable = client.getTable(catName, dbName, tableNames[2]); + for (int i = 0; i < partVals.length; i++) { + partVals[i] = "part" + i; + new PartitionBuilder() + .inTable(partitionedTable) + .addValue(partVals[i]) + .addToTable(client, metaStore.getConf()); + } + + // Get tables, make sure the locations are correct + for (int i = 0; i < tableNames.length; i++) { + Table t = client.getTable(catName, dbName, tableNames[i]); + Assert.assertEquals(catName, t.getCatName()); + String expectedLocation = (i < 1) ? + new File(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])).toURI().toString() + : + new File(cat.getLocationUri() + File.separatorChar + dbName + ".db", + tableNames[i]).toURI().toString(); + + Assert.assertEquals(expectedLocation, t.getSd().getLocation() + "/"); + File dir = new File(new URI(t.getSd().getLocation()).getPath()); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + + } + + // Make sure getting table in the wrong catalog does not work + try { + Table t = client.getTable(DEFAULT_DATABASE_NAME, tableNames[0]); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + + // test getAllTables + Set fetchedNames = new HashSet<>(client.getAllTables(catName, dbName)); + Assert.assertEquals(tableNames.length, fetchedNames.size()); + for (String tableName : tableNames) Assert.assertTrue(fetchedNames.contains(tableName)); + + fetchedNames = new HashSet<>(client.getAllTables(DEFAULT_DATABASE_NAME)); + for (String tableName : tableNames) Assert.assertFalse(fetchedNames.contains(tableName)); + + // test getMaterializedViewsForRewriting + List materializedViews = client.getMaterializedViewsForRewriting(catName, dbName); + Assert.assertEquals(1, materializedViews.size()); + Assert.assertEquals(tableNames[3], materializedViews.get(0)); + + fetchedNames = new HashSet<>(client.getMaterializedViewsForRewriting(DEFAULT_DATABASE_NAME)); + Assert.assertFalse(fetchedNames.contains(tableNames[3])); + + // test getTableObjectsByName + List

fetchedTables = client.getTableObjectsByName(catName, dbName, + Arrays.asList(tableNames[0], tableNames[1])); + Assert.assertEquals(2, fetchedTables.size()); + Collections.sort(fetchedTables); + Assert.assertEquals(tableNames[0], fetchedTables.get(0).getTableName()); + Assert.assertEquals(tableNames[1], fetchedTables.get(1).getTableName()); + + fetchedTables = client.getTableObjectsByName(DEFAULT_DATABASE_NAME, + Arrays.asList(tableNames[0], tableNames[1])); + Assert.assertEquals(0, fetchedTables.size()); + + // Test altering the table + Table t = client.getTable(catName, dbName, tableNames[0]).deepCopy(); + t.getParameters().put("test", "test"); + client.alter_table(catName, dbName, tableNames[0], t); + t = client.getTable(catName, dbName, tableNames[0]).deepCopy(); + Assert.assertEquals("test", t.getParameters().get("test")); + + // Alter a table in the wrong catalog + try { + client.alter_table(DEFAULT_DATABASE_NAME, tableNames[0], t); + Assert.fail(); + } catch (InvalidOperationException e) { + // NOP + } + + // Update the metadata for the materialized view + CreationMetadata cm = client.getTable(catName, dbName, tableNames[3]).getCreationMetadata(); + cm.addToTablesUsed(dbName + "." + tableNames[1]); + client.updateCreationMetadata(catName, dbName, tableNames[3], cm); + + List partNames = new ArrayList<>(); + for (String partVal : partVals) partNames.add("pcol1=" + partVal); + // Truncate a table + client.truncateTable(catName, dbName, tableNames[0], partNames); + + // Truncate a table in the wrong catalog + try { + client.truncateTable(DEFAULT_DATABASE_NAME, tableNames[0], partNames); + Assert.fail(); + } catch (NoSuchObjectException|TApplicationException e) { + // NOP + } + + // Drop a table from the wrong catalog + try { + client.dropTable(DEFAULT_DATABASE_NAME, tableNames[0], true, false); + Assert.fail(); + } catch (NoSuchObjectException|TApplicationException e) { + // NOP + } + + // Should ignore the failure + client.dropTable(DEFAULT_DATABASE_NAME, tableNames[0], false, true); + + // Have to do this in reverse order so that we drop the materialized view first. + for (int i = tableNames.length - 1; i >= 0; i--) { + t = client.getTable(catName, dbName, tableNames[i]); + File tableDir = new File(new URI(t.getSd().getLocation()).getPath()); + Assert.assertTrue(tableDir.exists() && tableDir.isDirectory()); + + if (tableNames[i].equalsIgnoreCase(tableNames[0])) { + client.dropTable(catName, dbName, tableNames[i], false, false); + Assert.assertTrue(tableDir.exists() && tableDir.isDirectory()); + } else { + client.dropTable(catName, dbName, tableNames[i]); + Assert.assertFalse(tableDir.exists()); + } + } + Assert.assertEquals(0, client.getAllTables(catName, dbName).size()); + } + + @Test(expected = InvalidObjectException.class) + public void createTableInBogusCatalog() throws TException { + new TableBuilder() + .setCatName("nosuch") + .setTableName("doomed") + .addCol("col1", ColumnType.STRING_TYPE_NAME) + .addCol("col2", ColumnType.INT_TYPE_NAME) + .create(client, metaStore.getConf()); + } + + @Test(expected = NoSuchObjectException.class) + public void getTableInBogusCatalog() throws TException { + client.getTable("nosuch", testTables[0].getDbName(), testTables[0].getTableName()); + } + + @Test + public void getAllTablesInBogusCatalog() throws TException { + List names = client.getAllTables("nosuch", testTables[0].getDbName()); + Assert.assertTrue(names.isEmpty()); + } + + @Test(expected = UnknownDBException.class) + public void getTableObjectsByNameBogusCatalog() throws TException { + client.getTableObjectsByName("nosuch", testTables[0].getDbName(), + Arrays.asList(testTables[0].getTableName(), testTables[1].getTableName())); + } + + @Test + public void getMaterializedViewsInBogusCatalog() throws TException { + List names = client.getMaterializedViewsForRewriting("nosuch", DEFAULT_DATABASE_NAME); + Assert.assertTrue(names.isEmpty()); + } + + @Test(expected = InvalidOperationException.class) + public void alterTableBogusCatalog() throws TException { + Table t = testTables[0].deepCopy(); + t.getParameters().put("a", "b"); + client.alter_table("nosuch", t.getDbName(), t.getTableName(), t); + } + + @Test(expected = InvalidOperationException.class) + public void moveTablesBetweenCatalogsOnAlter() throws TException { + String catName = "move_table_between_catalogs_on_alter"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "a_db"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String tableName = "non_movable_table"; + Table before = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("col1", ColumnType.STRING_TYPE_NAME) + .addCol("col2", ColumnType.INT_TYPE_NAME) + .create(client, metaStore.getConf()); + Table after = before.deepCopy(); + after.setCatName(DEFAULT_CATALOG_NAME); + client.alter_table(catName, dbName, tableName, after); + + } + + @Test + public void truncateTableBogusCatalog() throws TException { + try { + List partNames = client.listPartitionNames(partitionedTable.getDbName(), + partitionedTable.getTableName(), (short) -1); + client.truncateTable("nosuch", partitionedTable.getDbName(), partitionedTable.getTableName(), + partNames); + Assert.fail(); // For reasons I don't understand and am too lazy to debug at the moment the + // NoSuchObjectException gets swallowed by a TApplicationException in remote mode. + } catch (TApplicationException|NoSuchObjectException e) { + //NOP + } + } + + @Test(expected = NoSuchObjectException.class) + public void dropTableBogusCatalog() throws TException { + client.dropTable("nosuch", testTables[0].getDbName(), testTables[0].getTableName(), true, false); + } + /** * Creates a Table with all of the parameters set. The temporary table is available only on HS2 * server, so do not use it. @@ -1105,6 +1360,6 @@ private Table getTableWithAllParametersSet() throws MetaException { .addSerdeParam("serdeParam", "serdeParamValue") .addTableParam("tableParam", "tableParamValue") .addStorageDescriptorParam("sdParam", "sdParamValue") - .build(); + .build(metaStore.getConf()); } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java index a1716ce404..0de7f87bc6 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java @@ -18,16 +18,22 @@ package org.apache.hadoop.hive.metastore.client; +import org.apache.hadoop.hive.metastore.ColumnType; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TException; import org.apache.thrift.protocol.TProtocolException; import org.apache.thrift.transport.TTransportException; import org.junit.After; @@ -39,7 +45,11 @@ import org.junit.runners.Parameterized; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; /** * Test class for IMetaStoreClient API. Testing the Table related functions for metadata @@ -78,7 +88,7 @@ public void setUp() throws Exception { .setDbName(DEFAULT_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[1] = new TableBuilder() @@ -86,14 +96,14 @@ public void setUp() throws Exception { .setTableName("test_view") .addCol("test_col", "int") .setType("VIEW") - .build(); + .create(client, metaStore.getConf()); testTables[2] = new TableBuilder() .setDbName(DEFAULT_DATABASE) .setTableName("test_table_to_find_1") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[3] = new TableBuilder() @@ -101,39 +111,35 @@ public void setUp() throws Exception { .setTableName("test_table_to_find_2") .addCol("test_col", "int") .setType("VIEW") - .build(); + .create(client, metaStore.getConf()); testTables[4] = new TableBuilder() .setDbName(DEFAULT_DATABASE) .setTableName("test_table_hidden_1") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); - client.createDatabase(new DatabaseBuilder().setName(OTHER_DATABASE).build()); + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); testTables[5] = new TableBuilder() .setDbName(OTHER_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[6] = new TableBuilder() .setDbName(OTHER_DATABASE) .setTableName("test_table_to_find_3") .addCol("test_col", "int") - .build(); - - // Create the tables in the MetaStore - for(int i=0; i < testTables.length; i++) { - client.createTable(testTables[i]); - } + .create(client, metaStore.getConf()); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { - testTables[i] = client.getTable(testTables[i].getDbName(), testTables[i].getTableName()); + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); } } @@ -153,12 +159,12 @@ public void testGetTableCaseInsensitive() throws Exception { Table table = testTables[0]; // Test in upper case - Table resultUpper = client.getTable(table.getDbName().toUpperCase(), - table.getTableName().toUpperCase()); + Table resultUpper = client.getTable(table.getCatName().toUpperCase(), + table.getDbName().toUpperCase(), table.getTableName().toUpperCase()); Assert.assertEquals("Comparing tables", table, resultUpper); // Test in mixed case - Table resultMix = client.getTable("DeFaUlt", "tEsT_TabLE"); + Table resultMix = client.getTable("hIvE", "DeFaUlt", "tEsT_TabLE"); Assert.assertEquals("Comparing tables", table, resultMix); } @@ -222,7 +228,7 @@ public void testGetAllTables() throws Exception { } // Drop one table, see what remains - client.dropTable(testTables[1].getDbName(), testTables[1].getTableName()); + client.dropTable(testTables[1].getCatName(), testTables[1].getDbName(), testTables[1] .getTableName()); tables = client.getAllTables(DEFAULT_DATABASE); Assert.assertEquals("All tables size", 4, tables.size()); for(Table table : testTables) { @@ -274,7 +280,7 @@ public void testGetTables() throws Exception { Assert.assertEquals("No such table size", 0, tables.size()); // Look for tables without pattern - tables = client.getTables(DEFAULT_DATABASE, null); + tables = client.getTables(DEFAULT_DATABASE, (String)null); Assert.assertEquals("No such functions size", 5, tables.size()); // Look for tables with empty pattern @@ -305,8 +311,9 @@ public void testTableExists() throws Exception { // Using the second table, since a table called "test_table" exists in both databases Table table = testTables[1]; - Assert.assertTrue("Table exists", client.tableExists(table.getDbName(), table.getTableName())); - Assert.assertFalse("Table not exists", client.tableExists(table.getDbName(), + Assert.assertTrue("Table exists", client.tableExists(table.getCatName(), table.getDbName(), + table.getTableName())); + Assert.assertFalse("Table not exists", client.tableExists(table.getCatName(), table.getDbName(), "non_existing_table")); // No such database @@ -323,11 +330,11 @@ public void testTableExistsCaseInsensitive() throws Exception { Table table = testTables[0]; // Test in upper case - Assert.assertTrue("Table exists", client.tableExists(table.getDbName().toUpperCase(), - table.getTableName().toUpperCase())); + Assert.assertTrue("Table exists", client.tableExists(table.getCatName().toUpperCase(), + table.getDbName().toUpperCase(), table.getTableName().toUpperCase())); // Test in mixed case - Assert.assertTrue("Table exists", client.tableExists("DeFaUlt", "tEsT_TabLE")); + Assert.assertTrue("Table exists", client.tableExists("hIVe", "DeFaUlt", "tEsT_TabLE")); } @Test @@ -360,7 +367,7 @@ public void testTableExistsNullTableName() throws Exception { @Test public void testGetTableObjectsByName() throws Exception { - List tableNames = new ArrayList(); + List tableNames = new ArrayList<>(); tableNames.add(testTables[0].getTableName()); tableNames.add(testTables[1].getTableName()); List
tables = client.getTableObjectsByName(DEFAULT_DATABASE, tableNames); @@ -374,17 +381,17 @@ public void testGetTableObjectsByName() throws Exception { } // Test with empty array - tables = client.getTableObjectsByName(DEFAULT_DATABASE, new ArrayList()); + tables = client.getTableObjectsByName(DEFAULT_DATABASE, new ArrayList<>()); Assert.assertEquals("Found tables", 0, tables.size()); // Test with table name which does not exists - tableNames = new ArrayList(); + tableNames = new ArrayList<>(); tableNames.add("no_such_table"); - client.getTableObjectsByName(testTables[0].getDbName(), tableNames); + client.getTableObjectsByName(testTables[0].getCatName(), testTables[0].getDbName(), tableNames); Assert.assertEquals("Found tables", 0, tables.size()); // Test with table name which does not exists in the given database - tableNames = new ArrayList(); + tableNames = new ArrayList<>(); tableNames.add(testTables[0].getTableName()); client.getTableObjectsByName(OTHER_DATABASE, tableNames); Assert.assertEquals("Found tables", 0, tables.size()); @@ -396,23 +403,24 @@ public void testGetTableObjectsByNameCaseInsensitive() throws Exception { Table table = testTables[0]; // Test in upper case - List tableNames = new ArrayList(); + List tableNames = new ArrayList<>(); tableNames.add(testTables[0].getTableName().toUpperCase()); - List
tables = client.getTableObjectsByName(table.getDbName().toUpperCase(), tableNames); + List
tables = client.getTableObjectsByName(table.getCatName().toUpperCase(), + table.getDbName().toUpperCase(), tableNames); Assert.assertEquals("Found tables", 1, tables.size()); Assert.assertEquals("Comparing tables", table, tables.get(0)); // Test in mixed case - tableNames = new ArrayList(); + tableNames = new ArrayList<>(); tableNames.add("tEsT_TabLE"); - tables = client.getTableObjectsByName("DeFaUlt", tableNames); + tables = client.getTableObjectsByName("HiVe", "DeFaUlt", tableNames); Assert.assertEquals("Found tables", 1, tables.size()); Assert.assertEquals("Comparing tables", table, tables.get(0)); } @Test(expected = UnknownDBException.class) public void testGetTableObjectsByNameNoSuchDatabase() throws Exception { - List tableNames = new ArrayList(); + List tableNames = new ArrayList<>(); tableNames.add(testTables[0].getTableName()); client.getTableObjectsByName("no_such_database", tableNames); @@ -421,7 +429,7 @@ public void testGetTableObjectsByNameNoSuchDatabase() throws Exception { @Test public void testGetTableObjectsByNameNullDatabase() throws Exception { try { - List tableNames = new ArrayList(); + List tableNames = new ArrayList<>(); tableNames.add(OTHER_DATABASE); client.getTableObjectsByName(null, tableNames); @@ -448,4 +456,55 @@ public void testGetTableObjectsByNameNullTableNameList() throws Exception { // Expected exception - Remote MetaStore } } + + // Tests for getTable in other catalogs are covered in TestTablesCreateDropAlterTruncate. + @Test + public void otherCatalog() throws TException { + String catName = "get_exists_tables_in_other_catalogs"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "db_in_other_catalog"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String[] tableNames = new String[4]; + for (int i = 0; i < tableNames.length; i++) { + tableNames[i] = "table_in_other_catalog_" + i; + new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) + .addCol("col2_" + i, ColumnType.INT_TYPE_NAME) + .create(client, metaStore.getConf()); + } + + Set tables = new HashSet<>(client.getTables(catName, dbName, "*e_in_other_*")); + Assert.assertEquals(4, tables.size()); + for (String tableName : tableNames) Assert.assertTrue(tables.contains(tableName)); + + List fetchedNames = client.getTables(catName, dbName, "*_3"); + Assert.assertEquals(1, fetchedNames.size()); + Assert.assertEquals(tableNames[3], fetchedNames.get(0)); + + Assert.assertTrue("Table exists", client.tableExists(catName, dbName, tableNames[0])); + Assert.assertFalse("Table not exists", client.tableExists(catName, dbName, "non_existing_table")); + } + + @Test + public void getTablesBogusCatalog() throws TException { + Assert.assertEquals(0, client.getTables("nosuch", DEFAULT_DATABASE_NAME, "*_to_find_*").size()); + } + + @Test + public void tableExistsBogusCatalog() throws TException { + Assert.assertFalse(client.tableExists("nosuch", testTables[0].getDbName(), + testTables[0].getTableName())); + } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java index 7e4a59f2ad..00e9104122 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java @@ -18,16 +18,22 @@ package org.apache.hadoop.hive.metastore.client; +import org.apache.hadoop.hive.metastore.ColumnType; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TException; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -38,6 +44,8 @@ import java.util.List; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + /** * Test class for IMetaStoreClient API. Testing the Table related functions for metadata * querying like getting one, or multiple tables, and table name lists. @@ -78,7 +86,7 @@ public void setUp() throws Exception { .setOwner("Owner1") .setLastAccessTime(1000) .addTableParam("param1", "value1") - .build(); + .create(client, metaStore.getConf()); testTables[1] = new TableBuilder() @@ -88,7 +96,7 @@ public void setUp() throws Exception { .setOwner("Owner1") .setLastAccessTime(2000) .addTableParam("param1", "value2") - .build(); + .create(client, metaStore.getConf()); testTables[2] = new TableBuilder() @@ -98,7 +106,7 @@ public void setUp() throws Exception { .setOwner("Owner2") .setLastAccessTime(1000) .addTableParam("param1", "value2") - .build(); + .create(client, metaStore.getConf()); testTables[3] = new TableBuilder() @@ -108,7 +116,7 @@ public void setUp() throws Exception { .setOwner("Owner3") .setLastAccessTime(3000) .addTableParam("param1", "value2") - .build(); + .create(client, metaStore.getConf()); testTables[4] = new TableBuilder() @@ -118,16 +126,16 @@ public void setUp() throws Exception { .setOwner("Tester") .setLastAccessTime(2500) .addTableParam("param1", "value4") - .build(); + .create(client, metaStore.getConf()); testTables[5] = new TableBuilder() .setDbName(DEFAULT_DATABASE) .setTableName("filter_test_table_5") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); - client.createDatabase(new DatabaseBuilder().setName(OTHER_DATABASE).build()); + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); testTables[6] = new TableBuilder() @@ -137,16 +145,12 @@ public void setUp() throws Exception { .setOwner("Owner1") .setLastAccessTime(1000) .addTableParam("param1", "value1") - .build(); - - // Create the tables in the MetaStore - for(int i=0; i < testTables.length; i++) { - client.createTable(testTables[i]); - } + .create(client, metaStore.getConf()); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { - testTables[i] = client.getTable(testTables[i].getDbName(), testTables[i].getTableName()); + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); } } @@ -268,4 +272,45 @@ public void testListTableNamesByFilterNullFilter() throws Exception { public void testListTableNamesByFilterInvalidFilter() throws Exception { client.listTableNamesByFilter(DEFAULT_DATABASE, "invalid filter", (short)-1); } + + @Test + public void otherCatalogs() throws TException { + String catName = "list_tables_in_other_catalogs"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "db_in_other_catalog"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String[] tableNames = new String[4]; + for (int i = 0; i < tableNames.length; i++) { + tableNames[i] = "table_in_other_catalog_" + i; + TableBuilder builder = new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) + .addCol("col2_" + i, ColumnType.INT_TYPE_NAME); + if (i == 0) builder.addTableParam("the_key", "the_value"); + builder.create(client, metaStore.getConf()); + } + + String filter = hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "the_key=\"the_value\""; + List fetchedNames = client.listTableNamesByFilter(catName, dbName, filter, (short)-1); + Assert.assertEquals(1, fetchedNames.size()); + Assert.assertEquals(tableNames[0], fetchedNames.get(0)); + } + + @Test(expected = UnknownDBException.class) + public void listTablesBogusCatalog() throws TException { + String filter = hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "the_key=\"the_value\""; + List fetchedNames = client.listTableNamesByFilter("", DEFAULT_DATABASE_NAME, + filter, (short)-1); + } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java new file mode 100644 index 0000000000..8eb18ece3e --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java @@ -0,0 +1,353 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLUniqueConstraintBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.List; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +@RunWith(Parameterized.class) +@Category(MetastoreCheckinTest.class) +public class TestUniqueConstraint extends MetaStoreClientTest { + private static final String OTHER_DATABASE = "test_uc_other_database"; + private static final String OTHER_CATALOG = "test_uc_other_catalog"; + private static final String DATABASE_IN_OTHER_CATALOG = "test_uc_database_in_other_catalog"; + private final AbstractMetaStoreService metaStore; + private IMetaStoreClient client; + private Table[] testTables = new Table[3]; + private Database inOtherCatalog; + + public TestUniqueConstraint(String name, AbstractMetaStoreService metaStore) throws Exception { + this.metaStore = metaStore; + this.metaStore.start(); + } + + @Before + public void setUp() throws Exception { + // Get new client + client = metaStore.getClient(); + + // Clean up the database + client.dropDatabase(OTHER_DATABASE, true, true, true); + // Drop every table in the default database + for(String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) { + client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true); + } + + client.dropDatabase(OTHER_CATALOG, DATABASE_IN_OTHER_CATALOG, true, true, true); + try { + client.dropCatalog(OTHER_CATALOG); + } catch (NoSuchObjectException e) { + // NOP + } + + // Clean up trash + metaStore.cleanWarehouseDirs(); + + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); + + Catalog cat = new CatalogBuilder() + .setName(OTHER_CATALOG) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(OTHER_CATALOG)) + .build(); + client.createCatalog(cat); + + // For this one don't specify a location to make sure it gets put in the catalog directory + inOtherCatalog = new DatabaseBuilder() + .setName(DATABASE_IN_OTHER_CATALOG) + .setCatalogName(OTHER_CATALOG) + .create(client, metaStore.getConf()); + + testTables[0] = + new TableBuilder() + .setTableName("test_table_1") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[1] = + new TableBuilder() + .setDbName(OTHER_DATABASE) + .setTableName("test_table_2") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[2] = + new TableBuilder() + .inDb(inOtherCatalog) + .setTableName("test_table_3") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + // Reload tables from the MetaStore + for(int i=0; i < testTables.length; i++) { + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); + } + } + + @After + public void tearDown() throws Exception { + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + @Test + public void createGetDrop() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + UniqueConstraintsRequest rqst = + new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addUniqueConstraint(uc); + + rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(table.getTableName() + "_unique_constraint", fetched.get(0).getUk_name()); + String table0PkName = fetched.get(0).getUk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a primary key + client.dropConstraint(table.getCatName(), table.getDbName(), + table.getTableName(), table0PkName); + rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addUniqueConstraint(uc); + } + + @Test + public void inOtherCatalog() throws TException { + String constraintName = "ocuc"; + // Table in non 'hive' catalog + List uc = new SQLUniqueConstraintBuilder() + .onTable(testTables[2]) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + client.addUniqueConstraint(uc); + + UniqueConstraintsRequest rqst = new UniqueConstraintsRequest(testTables[2].getCatName(), + testTables[2].getDbName(), testTables[2].getTableName()); + List fetched = client.getUniqueConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(testTables[2].getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(testTables[2].getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(constraintName, fetched.get(0).getUk_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(testTables[2].getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName(), constraintName); + rqst = new UniqueConstraintsRequest(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void createTableWithConstraintsPk() throws TException { + String constraintName = "ctwcuc"; + Table table = new TableBuilder() + .setTableName("table_with_constraints") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, null, uc, null, null, null); + UniqueConstraintsRequest rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getUniqueConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(constraintName, fetched.get(0).getUk_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), constraintName); + rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + } + + @Test + public void createTableWithConstraintsPkInOtherCatalog() throws TException { + Table table = new TableBuilder() + .setTableName("table_in_other_catalog_with_constraints") + .inDb(inOtherCatalog) + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, null, uc, null, null, null); + UniqueConstraintsRequest rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getUniqueConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(table.getTableName() + "_unique_constraint", fetched.get(0).getUk_name()); + String tablePkName = fetched.get(0).getUk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), tablePkName); + rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void doubleAddUniqueConstraint() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + UniqueConstraintsRequest rqst = + new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addUniqueConstraint(uc); + + try { + uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col2") + .build(metaStore.getConf()); + client.addUniqueConstraint(uc); + Assert.fail(); + } catch (InvalidObjectException|TApplicationException e) { + // NOP + } + } + + @Test + public void addNoSuchTable() throws TException { + try { + List uc = new SQLUniqueConstraintBuilder() + .setTableName("nosuch") + .addColumn("col2") + .build(metaStore.getConf()); + client.addUniqueConstraint(uc); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void getNoSuchTable() throws TException { + UniqueConstraintsRequest rqst = + new UniqueConstraintsRequest(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "nosuch"); + List uc = client.getUniqueConstraints(rqst); + Assert.assertTrue(uc.isEmpty()); + } + + @Test + public void getNoSuchDb() throws TException { + UniqueConstraintsRequest rqst = + new UniqueConstraintsRequest(DEFAULT_CATALOG_NAME, "nosuch", testTables[0].getTableName()); + List uc = client.getUniqueConstraints(rqst); + Assert.assertTrue(uc.isEmpty()); + } + + @Test + public void getNoSuchCatalog() throws TException { + UniqueConstraintsRequest rqst = new UniqueConstraintsRequest("nosuch", + testTables[0].getDbName(), testTables[0].getTableName()); + List uc = client.getUniqueConstraints(rqst); + Assert.assertTrue(uc.isEmpty()); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java index f2c8fe4b82..709085d71f 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java @@ -166,4 +166,8 @@ public void cleanWarehouseDirs() throws MetaException { */ public void stop() { } + + public Configuration getConf() { + return configuration; + } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java index 409ddc55ec..fa7057f83e 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java @@ -99,7 +99,8 @@ public void testValidateSequences() throws Exception { // Test valid case String[] scripts = new String[] { "insert into SEQUENCE_TABLE values('org.apache.hadoop.hive.metastore.model.MDatabase', 100);", - "insert into DBS values(99, 'test db1', 'hdfs:///tmp', 'db1', 'test', 'test');" + "insert into CTLGS values(37, 'mycat', 'my description', 'hdfs://tmp');", + "insert into DBS values(99, 'test db1', 'hdfs:///tmp', 'db1', 'test', 'test', 'mycat');" }; File scriptFile = generateTestScript(scripts); schemaTool.runSqlLine(scriptFile.getPath()); @@ -111,7 +112,7 @@ public void testValidateSequences() throws Exception { "delete from SEQUENCE_TABLE;", "delete from DBS;", "insert into SEQUENCE_TABLE values('org.apache.hadoop.hive.metastore.model.MDatabase', 100);", - "insert into DBS values(102, 'test db1', 'hdfs:///tmp', 'db1', 'test', 'test');" + "insert into DBS values(102, 'test db1', 'hdfs:///tmp', 'db1', 'test', 'test', 'mycat');" }; scriptFile = generateTestScript(scripts); schemaTool.runSqlLine(scriptFile.getPath()); @@ -217,6 +218,7 @@ public void testSchemaUpgradeDryRun() throws Exception { public void testSchemaInit() throws Exception { IMetaStoreSchemaInfo metastoreSchemaInfo = MetaStoreSchemaInfoFactory.get(conf, System.getProperty("test.tmp.dir", "target/tmp"), "derby"); + LOG.info("Starting testSchemaInit"); schemaTool.doInit(metastoreSchemaInfo.getHiveSchemaVersion()); schemaTool.verifySchemaVersion(); } @@ -296,11 +298,18 @@ public void testSchemaUpgrade() throws Exception { System.setOut(outPrintStream); // Upgrade schema from 0.7.0 to latest - schemaTool.doUpgrade("1.2.0"); + Exception caught = null; + try { + schemaTool.doUpgrade("1.2.0"); + } catch (Exception e) { + caught = e; + } LOG.info("stdout is " + stdout.toString()); LOG.info("stderr is " + stderr.toString()); + if (caught != null) Assert.fail(caught.getMessage()); + // Verify that the schemaTool ran pre-upgrade scripts and ignored errors Assert.assertTrue(stderr.toString().contains(invalidPreUpgradeScript)); Assert.assertTrue(stderr.toString().contains("foo")); @@ -329,8 +338,9 @@ public void testValidateLocations() throws Exception { // Test valid case String[] scripts = new String[] { - "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role');", - "insert into DBS values(7, 'db with bad port', 'hdfs://myhost.com:8020/', 'haDB', 'public', 'role');", + "insert into CTLGS values (1, 'mycat', 'mydescription', 'hdfs://myhost.com:8020/user/hive/warehouse');", + "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role', 'mycat');", + "insert into DBS values(7, 'db with bad port', 'hdfs://myhost.com:8020/', 'haDB', 'public', 'role', 'mycat');", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (3,null,'org.apache.hadoop.mapred.TextInputFormat','N','N',null,-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", @@ -357,10 +367,10 @@ public void testValidateLocations() throws Exception { "delete from TBLS;", "delete from SDS;", "delete from DBS;", - "insert into DBS values(2, 'my db', '/user/hive/warehouse/mydb', 'mydb', 'public', 'role');", - "insert into DBS values(4, 'my db2', 'hdfs://myhost.com:8020', '', 'public', 'role');", - "insert into DBS values(6, 'db with bad port', 'hdfs://myhost.com:8020:', 'zDB', 'public', 'role');", - "insert into DBS values(7, 'db with bad port', 'hdfs://mynameservice.com/', 'haDB', 'public', 'role');", + "insert into DBS values(2, 'my db', '/user/hive/warehouse/mydb', 'mydb', 'public', 'role', 'mycat');", + "insert into DBS values(4, 'my db2', 'hdfs://myhost.com:8020', '', 'public', 'role', 'mycat');", + "insert into DBS values(6, 'db with bad port', 'hdfs://myhost.com:8020:', 'zDB', 'public', 'role', 'mycat');", + "insert into DBS values(7, 'db with bad port', 'hdfs://mynameservice.com/', 'haDB', 'public', 'role', 'mycat');", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','file:///user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');", @@ -457,7 +467,8 @@ private String writeDummyPreUpgradeScript(int index, String upgradeScriptName, // Insert the records in DB to simulate a hive table private void createTestHiveTableSchemas() throws IOException { String[] scripts = new String[] { - "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role');", + "insert into CTLGS values (1, 'mycat', 'my description', 'hdfs://myhost.com:8020/user/hive/warehouse');", + "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role', 'mycat');", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');", diff --git standalone-metastore/src/test/resources/log4j2.properties standalone-metastore/src/test/resources/log4j2.properties index db8a55005d..365687e1c9 100644 --- standalone-metastore/src/test/resources/log4j2.properties +++ standalone-metastore/src/test/resources/log4j2.properties @@ -8,64 +8,28 @@ # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. -status = INFO -name = MetastoreLog4j2 -packages = org.apache.hadoop.hive.metastore +name=PropertiesConfig +property.filename = logs +appenders = console -# list of properties -property.metastore.log.level = INFO -property.metastore.root.logger = DRFA -property.metastore.log.dir = ${sys:java.io.tmpdir}/${sys:user.name} -property.metastore.log.file = metastore.log -property.hive.perflogger.log.level = INFO - -# list of all appenders -appenders = console, DRFA - -# console appender appender.console.type = Console -appender.console.name = console -appender.console.target = SYSTEM_ERR +appender.console.name = STDOUT appender.console.layout.type = PatternLayout -appender.console.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n - -# daily rolling file appender -appender.DRFA.type = RollingRandomAccessFile -appender.DRFA.name = DRFA -appender.DRFA.fileName = ${sys:metastore.log.dir}/${sys:metastore.log.file} -# Use %pid in the filePattern to append @ to the filename if you want separate log files for different CLI session -appender.DRFA.filePattern = ${sys:metastore.log.dir}/${sys:metastore.log.file}.%d{yyyy-MM-dd} -appender.DRFA.layout.type = PatternLayout -appender.DRFA.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n -appender.DRFA.policies.type = Policies -appender.DRFA.policies.time.type = TimeBasedTriggeringPolicy -appender.DRFA.policies.time.interval = 1 -appender.DRFA.policies.time.modulate = true -appender.DRFA.strategy.type = DefaultRolloverStrategy -appender.DRFA.strategy.max = 30 - -# list of all loggers -loggers = DataNucleus, Datastore, JPOX, PerfLogger - -logger.DataNucleus.name = DataNucleus -logger.DataNucleus.level = INFO - -logger.Datastore.name = Datastore -logger.Datastore.level = INFO - -logger.JPOX.name = JPOX -logger.JPOX.level = INFO +appender.console.layout.pattern = [%-5level] %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %c{1} - %msg%n -logger.PerfLogger.name = org.apache.hadoop.hive.ql.log.PerfLogger -logger.PerfLogger.level = ${sys:hive.perflogger.log.level} +loggers=file +logger.file.name=guru.springframework.blog.log4j2properties +logger.file.level = debug +logger.file.appenderRefs = file +logger.file.appenderRef.file.ref = LOGFILE -# root logger -rootLogger.level = ${sys:metastore.log.level} -rootLogger.appenderRefs = root -rootLogger.appenderRef.root.ref = ${sys:metastore.root.logger} +rootLogger.level = debug +rootLogger.appenderRefs = stdout +rootLogger.appenderRef.stdout.ref = STDOUT